5110ea0be91079064b5c3bd987a1f84cf0199984
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase, bit;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++) {
620                 if (i == TG3_APE_LOCK_GPIO)
621                         continue;
622                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623         }
624
625         /* Clear the correct bit of the GPIO lock too. */
626         if (!tp->pci_fn)
627                 bit = APE_LOCK_GRANT_DRIVER;
628         else
629                 bit = 1 << tp->pci_fn;
630
631         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636         int i, off;
637         int ret = 0;
638         u32 status, req, gnt, bit;
639
640         if (!tg3_flag(tp, ENABLE_APE))
641                 return 0;
642
643         switch (locknum) {
644         case TG3_APE_LOCK_GPIO:
645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646                         return 0;
647         case TG3_APE_LOCK_GRC:
648         case TG3_APE_LOCK_MEM:
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655                 req = TG3_APE_LOCK_REQ;
656                 gnt = TG3_APE_LOCK_GRANT;
657         } else {
658                 req = TG3_APE_PER_LOCK_REQ;
659                 gnt = TG3_APE_PER_LOCK_GRANT;
660         }
661
662         off = 4 * locknum;
663
664         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665                 bit = APE_LOCK_REQ_DRIVER;
666         else
667                 bit = 1 << tp->pci_fn;
668
669         tg3_ape_write32(tp, req + off, bit);
670
671         /* Wait for up to 1 millisecond to acquire lock. */
672         for (i = 0; i < 100; i++) {
673                 status = tg3_ape_read32(tp, gnt + off);
674                 if (status == bit)
675                         break;
676                 udelay(10);
677         }
678
679         if (status != bit) {
680                 /* Revoke the lock request. */
681                 tg3_ape_write32(tp, gnt + off, bit);
682                 ret = -EBUSY;
683         }
684
685         return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690         u32 gnt, bit;
691
692         if (!tg3_flag(tp, ENABLE_APE))
693                 return;
694
695         switch (locknum) {
696         case TG3_APE_LOCK_GPIO:
697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698                         return;
699         case TG3_APE_LOCK_GRC:
700         case TG3_APE_LOCK_MEM:
701                 break;
702         default:
703                 return;
704         }
705
706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707                 gnt = TG3_APE_LOCK_GRANT;
708         else
709                 gnt = TG3_APE_PER_LOCK_GRANT;
710
711         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712                 bit = APE_LOCK_GRANT_DRIVER;
713         else
714                 bit = 1 << tp->pci_fn;
715
716         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721         int i;
722
723         tw32(TG3PCI_MISC_HOST_CTRL,
724              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725         for (i = 0; i < tp->irq_max; i++)
726                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731         int i;
732
733         tp->irq_sync = 0;
734         wmb();
735
736         tw32(TG3PCI_MISC_HOST_CTRL,
737              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740         for (i = 0; i < tp->irq_cnt; i++) {
741                 struct tg3_napi *tnapi = &tp->napi[i];
742
743                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744                 if (tg3_flag(tp, 1SHOT_MSI))
745                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747                 tp->coal_now |= tnapi->coal_now;
748         }
749
750         /* Force an initial interrupt */
751         if (!tg3_flag(tp, TAGGED_STATUS) &&
752             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754         else
755                 tw32(HOSTCC_MODE, tp->coal_now);
756
757         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762         struct tg3 *tp = tnapi->tp;
763         struct tg3_hw_status *sblk = tnapi->hw_status;
764         unsigned int work_exists = 0;
765
766         /* check for phy events */
767         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768                 if (sblk->status & SD_STATUS_LINK_CHG)
769                         work_exists = 1;
770         }
771         /* check for RX/TX work to do */
772         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774                 work_exists = 1;
775
776         return work_exists;
777 }
778
779 /* tg3_int_reenable
780  *  similar to tg3_enable_ints, but it accurately determines whether there
781  *  is new work pending and can return without flushing the PIO write
782  *  which reenables interrupts
783  */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786         struct tg3 *tp = tnapi->tp;
787
788         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789         mmiowb();
790
791         /* When doing tagged status, this work check is unnecessary.
792          * The last_tag we write above tells the chip which piece of
793          * work we've completed.
794          */
795         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796                 tw32(HOSTCC_MODE, tp->coalesce_mode |
797                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802         u32 clock_ctrl;
803         u32 orig_clock_ctrl;
804
805         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806                 return;
807
808         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810         orig_clock_ctrl = clock_ctrl;
811         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812                        CLOCK_CTRL_CLKRUN_OENABLE |
813                        0x1f);
814         tp->pci_clock_ctrl = clock_ctrl;
815
816         if (tg3_flag(tp, 5705_PLUS)) {
817                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
819                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820                 }
821         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823                             clock_ctrl |
824                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825                             40);
826                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
828                             40);
829         }
830         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS  5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842                 tw32_f(MAC_MI_MODE,
843                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844                 udelay(80);
845         }
846
847         *val = 0x0;
848
849         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850                       MI_COM_PHY_ADDR_MASK);
851         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852                       MI_COM_REG_ADDR_MASK);
853         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855         tw32_f(MAC_MI_COM, frame_val);
856
857         loops = PHY_BUSY_LOOPS;
858         while (loops != 0) {
859                 udelay(10);
860                 frame_val = tr32(MAC_MI_COM);
861
862                 if ((frame_val & MI_COM_BUSY) == 0) {
863                         udelay(5);
864                         frame_val = tr32(MAC_MI_COM);
865                         break;
866                 }
867                 loops -= 1;
868         }
869
870         ret = -EBUSY;
871         if (loops != 0) {
872                 *val = frame_val & MI_COM_DATA_MASK;
873                 ret = 0;
874         }
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886         u32 frame_val;
887         unsigned int loops;
888         int ret;
889
890         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892                 return 0;
893
894         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895                 tw32_f(MAC_MI_MODE,
896                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897                 udelay(80);
898         }
899
900         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901                       MI_COM_PHY_ADDR_MASK);
902         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903                       MI_COM_REG_ADDR_MASK);
904         frame_val |= (val & MI_COM_DATA_MASK);
905         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907         tw32_f(MAC_MI_COM, frame_val);
908
909         loops = PHY_BUSY_LOOPS;
910         while (loops != 0) {
911                 udelay(10);
912                 frame_val = tr32(MAC_MI_COM);
913                 if ((frame_val & MI_COM_BUSY) == 0) {
914                         udelay(5);
915                         frame_val = tr32(MAC_MI_COM);
916                         break;
917                 }
918                 loops -= 1;
919         }
920
921         ret = -EBUSY;
922         if (loops != 0)
923                 ret = 0;
924
925         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926                 tw32_f(MAC_MI_MODE, tp->mi_mode);
927                 udelay(80);
928         }
929
930         return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961         if (err)
962                 goto done;
963
964         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965         if (err)
966                 goto done;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970         if (err)
971                 goto done;
972
973         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976         return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981         int err;
982
983         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984         if (!err)
985                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987         return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992         int err;
993
994         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995         if (!err)
996                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998         return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003         int err;
1004
1005         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1008         if (!err)
1009                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011         return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017                 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033         u32 phy_control;
1034         int limit, err;
1035
1036         /* OK, reset it, and poll the BMCR_RESET bit until it
1037          * clears or we time out.
1038          */
1039         phy_control = BMCR_RESET;
1040         err = tg3_writephy(tp, MII_BMCR, phy_control);
1041         if (err != 0)
1042                 return -EBUSY;
1043
1044         limit = 5000;
1045         while (limit--) {
1046                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047                 if (err != 0)
1048                         return -EBUSY;
1049
1050                 if ((phy_control & BMCR_RESET) == 0) {
1051                         udelay(40);
1052                         break;
1053                 }
1054                 udelay(10);
1055         }
1056         if (limit < 0)
1057                 return -EBUSY;
1058
1059         return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064         struct tg3 *tp = bp->priv;
1065         u32 val;
1066
1067         spin_lock_bh(&tp->lock);
1068
1069         if (tg3_readphy(tp, reg, &val))
1070                 val = -EIO;
1071
1072         spin_unlock_bh(&tp->lock);
1073
1074         return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079         struct tg3 *tp = bp->priv;
1080         u32 ret = 0;
1081
1082         spin_lock_bh(&tp->lock);
1083
1084         if (tg3_writephy(tp, reg, val))
1085                 ret = -EIO;
1086
1087         spin_unlock_bh(&tp->lock);
1088
1089         return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094         return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099         u32 val;
1100         struct phy_device *phydev;
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104         case PHY_ID_BCM50610:
1105         case PHY_ID_BCM50610M:
1106                 val = MAC_PHYCFG2_50610_LED_MODES;
1107                 break;
1108         case PHY_ID_BCMAC131:
1109                 val = MAC_PHYCFG2_AC131_LED_MODES;
1110                 break;
1111         case PHY_ID_RTL8211C:
1112                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113                 break;
1114         case PHY_ID_RTL8201E:
1115                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116                 break;
1117         default:
1118                 return;
1119         }
1120
1121         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122                 tw32(MAC_PHYCFG2, val);
1123
1124                 val = tr32(MAC_PHYCFG1);
1125                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128                 tw32(MAC_PHYCFG1, val);
1129
1130                 return;
1131         }
1132
1133         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135                        MAC_PHYCFG2_FMODE_MASK_MASK |
1136                        MAC_PHYCFG2_GMODE_MASK_MASK |
1137                        MAC_PHYCFG2_ACT_MASK_MASK   |
1138                        MAC_PHYCFG2_QUAL_MASK_MASK |
1139                        MAC_PHYCFG2_INBAND_ENABLE;
1140
1141         tw32(MAC_PHYCFG2, val);
1142
1143         val = tr32(MAC_PHYCFG1);
1144         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151         }
1152         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154         tw32(MAC_PHYCFG1, val);
1155
1156         val = tr32(MAC_EXT_RGMII_MODE);
1157         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158                  MAC_RGMII_MODE_RX_QUALITY |
1159                  MAC_RGMII_MODE_RX_ACTIVITY |
1160                  MAC_RGMII_MODE_RX_ENG_DET |
1161                  MAC_RGMII_MODE_TX_ENABLE |
1162                  MAC_RGMII_MODE_TX_LOWPWR |
1163                  MAC_RGMII_MODE_TX_RESET);
1164         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166                         val |= MAC_RGMII_MODE_RX_INT_B |
1167                                MAC_RGMII_MODE_RX_QUALITY |
1168                                MAC_RGMII_MODE_RX_ACTIVITY |
1169                                MAC_RGMII_MODE_RX_ENG_DET;
1170                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171                         val |= MAC_RGMII_MODE_TX_ENABLE |
1172                                MAC_RGMII_MODE_TX_LOWPWR |
1173                                MAC_RGMII_MODE_TX_RESET;
1174         }
1175         tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181         tw32_f(MAC_MI_MODE, tp->mi_mode);
1182         udelay(80);
1183
1184         if (tg3_flag(tp, MDIOBUS_INITED) &&
1185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186                 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191         int i;
1192         u32 reg;
1193         struct phy_device *phydev;
1194
1195         if (tg3_flag(tp, 5717_PLUS)) {
1196                 u32 is_serdes;
1197
1198                 tp->phy_addr = tp->pci_fn + 1;
1199
1200                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202                 else
1203                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1205                 if (is_serdes)
1206                         tp->phy_addr += 7;
1207         } else
1208                 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210         tg3_mdio_start(tp);
1211
1212         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213                 return 0;
1214
1215         tp->mdio_bus = mdiobus_alloc();
1216         if (tp->mdio_bus == NULL)
1217                 return -ENOMEM;
1218
1219         tp->mdio_bus->name     = "tg3 mdio bus";
1220         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222         tp->mdio_bus->priv     = tp;
1223         tp->mdio_bus->parent   = &tp->pdev->dev;
1224         tp->mdio_bus->read     = &tg3_mdio_read;
1225         tp->mdio_bus->write    = &tg3_mdio_write;
1226         tp->mdio_bus->reset    = &tg3_mdio_reset;
1227         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1229
1230         for (i = 0; i < PHY_MAX_ADDR; i++)
1231                 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233         /* The bus registration will look for all the PHYs on the mdio bus.
1234          * Unfortunately, it does not ensure the PHY is powered up before
1235          * accessing the PHY ID registers.  A chip reset is the
1236          * quickest way to bring the device back to an operational state..
1237          */
1238         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239                 tg3_bmcr_reset(tp);
1240
1241         i = mdiobus_register(tp->mdio_bus);
1242         if (i) {
1243                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244                 mdiobus_free(tp->mdio_bus);
1245                 return i;
1246         }
1247
1248         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250         if (!phydev || !phydev->drv) {
1251                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252                 mdiobus_unregister(tp->mdio_bus);
1253                 mdiobus_free(tp->mdio_bus);
1254                 return -ENODEV;
1255         }
1256
1257         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258         case PHY_ID_BCM57780:
1259                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261                 break;
1262         case PHY_ID_BCM50610:
1263         case PHY_ID_BCM50610M:
1264                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265                                      PHY_BRCM_RX_REFCLK_UNUSED |
1266                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274                 /* fallthru */
1275         case PHY_ID_RTL8211C:
1276                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277                 break;
1278         case PHY_ID_RTL8201E:
1279         case PHY_ID_BCMAC131:
1280                 phydev->interface = PHY_INTERFACE_MODE_MII;
1281                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283                 break;
1284         }
1285
1286         tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289                 tg3_mdio_config_5785(tp);
1290
1291         return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296         if (tg3_flag(tp, MDIOBUS_INITED)) {
1297                 tg3_flag_clear(tp, MDIOBUS_INITED);
1298                 mdiobus_unregister(tp->mdio_bus);
1299                 mdiobus_free(tp->mdio_bus);
1300         }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306         u32 val;
1307
1308         val = tr32(GRC_RX_CPU_EVENT);
1309         val |= GRC_RX_CPU_DRIVER_EVENT;
1310         tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312         tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320         int i;
1321         unsigned int delay_cnt;
1322         long time_remain;
1323
1324         /* If enough time has passed, no wait is necessary. */
1325         time_remain = (long)(tp->last_event_jiffies + 1 +
1326                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327                       (long)jiffies;
1328         if (time_remain < 0)
1329                 return;
1330
1331         /* Check if we can shorten the wait time. */
1332         delay_cnt = jiffies_to_usecs(time_remain);
1333         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335         delay_cnt = (delay_cnt >> 3) + 1;
1336
1337         for (i = 0; i < delay_cnt; i++) {
1338                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339                         break;
1340                 udelay(8);
1341         }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347         u32 reg;
1348         u32 val;
1349
1350         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351                 return;
1352
1353         tg3_wait_for_event_ack(tp);
1354
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359         val = 0;
1360         if (!tg3_readphy(tp, MII_BMCR, &reg))
1361                 val = reg << 16;
1362         if (!tg3_readphy(tp, MII_BMSR, &reg))
1363                 val |= (reg & 0xffff);
1364         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366         val = 0;
1367         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368                 val = reg << 16;
1369         if (!tg3_readphy(tp, MII_LPA, &reg))
1370                 val |= (reg & 0xffff);
1371         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373         val = 0;
1374         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376                         val = reg << 16;
1377                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378                         val |= (reg & 0xffff);
1379         }
1380         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383                 val = reg << 16;
1384         else
1385                 val = 0;
1386         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388         tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393         if (!netif_carrier_ok(tp->dev)) {
1394                 netif_info(tp, link, tp->dev, "Link is down\n");
1395                 tg3_ump_link_report(tp);
1396         } else if (netif_msg_link(tp)) {
1397                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398                             (tp->link_config.active_speed == SPEED_1000 ?
1399                              1000 :
1400                              (tp->link_config.active_speed == SPEED_100 ?
1401                               100 : 10)),
1402                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1403                              "full" : "half"));
1404
1405                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407                             "on" : "off",
1408                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409                             "on" : "off");
1410
1411                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412                         netdev_info(tp->dev, "EEE is %s\n",
1413                                     tp->setlpicnt ? "enabled" : "disabled");
1414
1415                 tg3_ump_link_report(tp);
1416         }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421         u16 miireg;
1422
1423         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424                 miireg = ADVERTISE_PAUSE_CAP;
1425         else if (flow_ctrl & FLOW_CTRL_TX)
1426                 miireg = ADVERTISE_PAUSE_ASYM;
1427         else if (flow_ctrl & FLOW_CTRL_RX)
1428                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429         else
1430                 miireg = 0;
1431
1432         return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437         u16 miireg;
1438
1439         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440                 miireg = ADVERTISE_1000XPAUSE;
1441         else if (flow_ctrl & FLOW_CTRL_TX)
1442                 miireg = ADVERTISE_1000XPSE_ASYM;
1443         else if (flow_ctrl & FLOW_CTRL_RX)
1444                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445         else
1446                 miireg = 0;
1447
1448         return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453         u8 cap = 0;
1454
1455         if (lcladv & ADVERTISE_1000XPAUSE) {
1456                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457                         if (rmtadv & LPA_1000XPAUSE)
1458                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460                                 cap = FLOW_CTRL_RX;
1461                 } else {
1462                         if (rmtadv & LPA_1000XPAUSE)
1463                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464                 }
1465         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467                         cap = FLOW_CTRL_TX;
1468         }
1469
1470         return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475         u8 autoneg;
1476         u8 flowctrl = 0;
1477         u32 old_rx_mode = tp->rx_mode;
1478         u32 old_tx_mode = tp->tx_mode;
1479
1480         if (tg3_flag(tp, USE_PHYLIB))
1481                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482         else
1483                 autoneg = tp->link_config.autoneg;
1484
1485         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488                 else
1489                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490         } else
1491                 flowctrl = tp->link_config.flowctrl;
1492
1493         tp->link_config.active_flowctrl = flowctrl;
1494
1495         if (flowctrl & FLOW_CTRL_RX)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode)
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503         if (flowctrl & FLOW_CTRL_TX)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode)
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514         u8 oldflowctrl, linkmesg = 0;
1515         u32 mac_mode, lcl_adv, rmt_adv;
1516         struct tg3 *tp = netdev_priv(dev);
1517         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519         spin_lock_bh(&tp->lock);
1520
1521         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522                                     MAC_MODE_HALF_DUPLEX);
1523
1524         oldflowctrl = tp->link_config.active_flowctrl;
1525
1526         if (phydev->link) {
1527                 lcl_adv = 0;
1528                 rmt_adv = 0;
1529
1530                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1532                 else if (phydev->speed == SPEED_1000 ||
1533                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535                 else
1536                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538                 if (phydev->duplex == DUPLEX_HALF)
1539                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1540                 else {
1541                         lcl_adv = tg3_advert_flowctrl_1000T(
1542                                   tp->link_config.flowctrl);
1543
1544                         if (phydev->pause)
1545                                 rmt_adv = LPA_PAUSE_CAP;
1546                         if (phydev->asym_pause)
1547                                 rmt_adv |= LPA_PAUSE_ASYM;
1548                 }
1549
1550                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551         } else
1552                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554         if (mac_mode != tp->mac_mode) {
1555                 tp->mac_mode = mac_mode;
1556                 tw32_f(MAC_MODE, tp->mac_mode);
1557                 udelay(40);
1558         }
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561                 if (phydev->speed == SPEED_10)
1562                         tw32(MAC_MI_STAT,
1563                              MAC_MI_STAT_10MBPS_MODE |
1564                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565                 else
1566                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567         }
1568
1569         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570                 tw32(MAC_TX_LENGTHS,
1571                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572                       (6 << TX_LENGTHS_IPG_SHIFT) |
1573                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574         else
1575                 tw32(MAC_TX_LENGTHS,
1576                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577                       (6 << TX_LENGTHS_IPG_SHIFT) |
1578                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582             phydev->speed != tp->link_config.active_speed ||
1583             phydev->duplex != tp->link_config.active_duplex ||
1584             oldflowctrl != tp->link_config.active_flowctrl)
1585                 linkmesg = 1;
1586
1587         tp->link_config.active_speed = phydev->speed;
1588         tp->link_config.active_duplex = phydev->duplex;
1589
1590         spin_unlock_bh(&tp->lock);
1591
1592         if (linkmesg)
1593                 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598         struct phy_device *phydev;
1599
1600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601                 return 0;
1602
1603         /* Bring the PHY back to a known state. */
1604         tg3_bmcr_reset(tp);
1605
1606         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608         /* Attach the MAC to the PHY. */
1609         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610                              phydev->dev_flags, phydev->interface);
1611         if (IS_ERR(phydev)) {
1612                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613                 return PTR_ERR(phydev);
1614         }
1615
1616         /* Mask with MAC supported features. */
1617         switch (phydev->interface) {
1618         case PHY_INTERFACE_MODE_GMII:
1619         case PHY_INTERFACE_MODE_RGMII:
1620                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621                         phydev->supported &= (PHY_GBIT_FEATURES |
1622                                               SUPPORTED_Pause |
1623                                               SUPPORTED_Asym_Pause);
1624                         break;
1625                 }
1626                 /* fallthru */
1627         case PHY_INTERFACE_MODE_MII:
1628                 phydev->supported &= (PHY_BASIC_FEATURES |
1629                                       SUPPORTED_Pause |
1630                                       SUPPORTED_Asym_Pause);
1631                 break;
1632         default:
1633                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634                 return -EINVAL;
1635         }
1636
1637         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639         phydev->advertising = phydev->supported;
1640
1641         return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646         struct phy_device *phydev;
1647
1648         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649                 return;
1650
1651         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655                 phydev->speed = tp->link_config.orig_speed;
1656                 phydev->duplex = tp->link_config.orig_duplex;
1657                 phydev->autoneg = tp->link_config.orig_autoneg;
1658                 phydev->advertising = tp->link_config.orig_advertising;
1659         }
1660
1661         phy_start(phydev);
1662
1663         phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669                 return;
1670
1671         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679         }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684         u32 phytest;
1685
1686         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687                 u32 phy;
1688
1689                 tg3_writephy(tp, MII_TG3_FET_TEST,
1690                              phytest | MII_TG3_FET_SHADOW_EN);
1691                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692                         if (enable)
1693                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694                         else
1695                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697                 }
1698                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699         }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704         u32 reg;
1705
1706         if (!tg3_flag(tp, 5705_PLUS) ||
1707             (tg3_flag(tp, 5717_PLUS) &&
1708              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709                 return;
1710
1711         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712                 tg3_phy_fet_toggle_apd(tp, enable);
1713                 return;
1714         }
1715
1716         reg = MII_TG3_MISC_SHDW_WREN |
1717               MII_TG3_MISC_SHDW_SCR5_SEL |
1718               MII_TG3_MISC_SHDW_SCR5_LPED |
1719               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720               MII_TG3_MISC_SHDW_SCR5_SDTL |
1721               MII_TG3_MISC_SHDW_SCR5_C125OE;
1722         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728         reg = MII_TG3_MISC_SHDW_WREN |
1729               MII_TG3_MISC_SHDW_APD_SEL |
1730               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731         if (enable)
1732                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739         u32 phy;
1740
1741         if (!tg3_flag(tp, 5705_PLUS) ||
1742             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743                 return;
1744
1745         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746                 u32 ephy;
1747
1748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751                         tg3_writephy(tp, MII_TG3_FET_TEST,
1752                                      ephy | MII_TG3_FET_SHADOW_EN);
1753                         if (!tg3_readphy(tp, reg, &phy)) {
1754                                 if (enable)
1755                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756                                 else
1757                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758                                 tg3_writephy(tp, reg, phy);
1759                         }
1760                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761                 }
1762         } else {
1763                 int ret;
1764
1765                 ret = tg3_phy_auxctl_read(tp,
1766                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767                 if (!ret) {
1768                         if (enable)
1769                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770                         else
1771                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772                         tg3_phy_auxctl_write(tp,
1773                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774                 }
1775         }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780         int ret;
1781         u32 val;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784                 return;
1785
1786         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787         if (!ret)
1788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794         u32 otp, phy;
1795
1796         if (!tp->phy_otp)
1797                 return;
1798
1799         otp = tp->phy_otp;
1800
1801         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802                 return;
1803
1804         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831         u32 val;
1832
1833         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834                 return;
1835
1836         tp->setlpicnt = 0;
1837
1838         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839             current_link_up == 1 &&
1840             tp->link_config.active_duplex == DUPLEX_FULL &&
1841             (tp->link_config.active_speed == SPEED_100 ||
1842              tp->link_config.active_speed == SPEED_1000)) {
1843                 u32 eeectl;
1844
1845                 if (tp->link_config.active_speed == SPEED_1000)
1846                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847                 else
1848                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853                                   TG3_CL45_D7_EEERES_STAT, &val);
1854
1855                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857                         tp->setlpicnt = 2;
1858         }
1859
1860         if (!tp->setlpicnt) {
1861                 if (current_link_up == 1 &&
1862                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865                 }
1866
1867                 val = tr32(TG3_CPMU_EEE_MODE);
1868                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1869         }
1870 }
1871
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1873 {
1874         u32 val;
1875
1876         if (tp->link_config.active_speed == SPEED_1000 &&
1877             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881                 val = MII_TG3_DSP_TAP26_ALNOKO |
1882                       MII_TG3_DSP_TAP26_RMRXSTO;
1883                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1885         }
1886
1887         val = tr32(TG3_CPMU_EEE_MODE);
1888         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1889 }
1890
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1892 {
1893         int limit = 100;
1894
1895         while (limit--) {
1896                 u32 tmp32;
1897
1898                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899                         if ((tmp32 & 0x1000) == 0)
1900                                 break;
1901                 }
1902         }
1903         if (limit < 0)
1904                 return -EBUSY;
1905
1906         return 0;
1907 }
1908
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1910 {
1911         static const u32 test_pat[4][6] = {
1912         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1916         };
1917         int chan;
1918
1919         for (chan = 0; chan < 4; chan++) {
1920                 int i;
1921
1922                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923                              (chan * 0x2000) | 0x0200);
1924                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1925
1926                 for (i = 0; i < 6; i++)
1927                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928                                      test_pat[chan][i]);
1929
1930                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931                 if (tg3_wait_macro_done(tp)) {
1932                         *resetp = 1;
1933                         return -EBUSY;
1934                 }
1935
1936                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937                              (chan * 0x2000) | 0x0200);
1938                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939                 if (tg3_wait_macro_done(tp)) {
1940                         *resetp = 1;
1941                         return -EBUSY;
1942                 }
1943
1944                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945                 if (tg3_wait_macro_done(tp)) {
1946                         *resetp = 1;
1947                         return -EBUSY;
1948                 }
1949
1950                 for (i = 0; i < 6; i += 2) {
1951                         u32 low, high;
1952
1953                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955                             tg3_wait_macro_done(tp)) {
1956                                 *resetp = 1;
1957                                 return -EBUSY;
1958                         }
1959                         low &= 0x7fff;
1960                         high &= 0x000f;
1961                         if (low != test_pat[chan][i] ||
1962                             high != test_pat[chan][i+1]) {
1963                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1966
1967                                 return -EBUSY;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1976 {
1977         int chan;
1978
1979         for (chan = 0; chan < 4; chan++) {
1980                 int i;
1981
1982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983                              (chan * 0x2000) | 0x0200);
1984                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985                 for (i = 0; i < 6; i++)
1986                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988                 if (tg3_wait_macro_done(tp))
1989                         return -EBUSY;
1990         }
1991
1992         return 0;
1993 }
1994
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1996 {
1997         u32 reg32, phy9_orig;
1998         int retries, do_phy_reset, err;
1999
2000         retries = 10;
2001         do_phy_reset = 1;
2002         do {
2003                 if (do_phy_reset) {
2004                         err = tg3_bmcr_reset(tp);
2005                         if (err)
2006                                 return err;
2007                         do_phy_reset = 0;
2008                 }
2009
2010                 /* Disable transmitter and interrupt.  */
2011                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012                         continue;
2013
2014                 reg32 |= 0x3000;
2015                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2016
2017                 /* Set full-duplex, 1000 mbps.  */
2018                 tg3_writephy(tp, MII_BMCR,
2019                              BMCR_FULLDPLX | BMCR_SPEED1000);
2020
2021                 /* Set to master mode.  */
2022                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023                         continue;
2024
2025                 tg3_writephy(tp, MII_CTRL1000,
2026                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2027
2028                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029                 if (err)
2030                         return err;
2031
2032                 /* Block the PHY control access.  */
2033                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2034
2035                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036                 if (!err)
2037                         break;
2038         } while (--retries);
2039
2040         err = tg3_phy_reset_chanpat(tp);
2041         if (err)
2042                 return err;
2043
2044         tg3_phydsp_write(tp, 0x8005, 0x0000);
2045
2046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2048
2049         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2050
2051         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2052
2053         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054                 reg32 &= ~0x3000;
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056         } else if (!err)
2057                 err = -EBUSY;
2058
2059         return err;
2060 }
2061
2062 /* This will reset the tigon3 PHY if there is no valid
2063  * link unless the FORCE argument is non-zero.
2064  */
2065 static int tg3_phy_reset(struct tg3 *tp)
2066 {
2067         u32 val, cpmuctrl;
2068         int err;
2069
2070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071                 val = tr32(GRC_MISC_CFG);
2072                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073                 udelay(40);
2074         }
2075         err  = tg3_readphy(tp, MII_BMSR, &val);
2076         err |= tg3_readphy(tp, MII_BMSR, &val);
2077         if (err != 0)
2078                 return -EBUSY;
2079
2080         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081                 netif_carrier_off(tp->dev);
2082                 tg3_link_report(tp);
2083         }
2084
2085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088                 err = tg3_phy_reset_5703_4_5(tp);
2089                 if (err)
2090                         return err;
2091                 goto out;
2092         }
2093
2094         cpmuctrl = 0;
2095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099                         tw32(TG3_CPMU_CTRL,
2100                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2101         }
2102
2103         err = tg3_bmcr_reset(tp);
2104         if (err)
2105                 return err;
2106
2107         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2110
2111                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2112         }
2113
2114         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2119                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120                         udelay(40);
2121                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2122                 }
2123         }
2124
2125         if (tg3_flag(tp, 5717_PLUS) &&
2126             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127                 return 0;
2128
2129         tg3_phy_apply_otp(tp);
2130
2131         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132                 tg3_phy_toggle_apd(tp, true);
2133         else
2134                 tg3_phy_toggle_apd(tp, false);
2135
2136 out:
2137         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142         }
2143
2144         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147         }
2148
2149         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2152                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2153                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2155                 }
2156         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161                                 tg3_writephy(tp, MII_TG3_TEST1,
2162                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2163                         } else
2164                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2165
2166                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167                 }
2168         }
2169
2170         /* Set Extended packet length bit (bit 14) on all chips that */
2171         /* support jumbo frames */
2172         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173                 /* Cannot do read-modify-write on 5401 */
2174                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176                 /* Set bit 14 with read-modify-write to preserve other bits */
2177                 err = tg3_phy_auxctl_read(tp,
2178                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179                 if (!err)
2180                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2182         }
2183
2184         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185          * jumbo frames transmission.
2186          */
2187         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2191         }
2192
2193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194                 /* adjust output voltage */
2195                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2196         }
2197
2198         tg3_phy_toggle_automdix(tp, 1);
2199         tg3_phy_set_wirespeed(tp);
2200         return 0;
2201 }
2202
2203 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2205 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2206                                           TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211          (TG3_GPIO_MSG_DRVR_PRES << 12))
2212
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217          (TG3_GPIO_MSG_NEED_VAUX << 12))
2218
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2220 {
2221         u32 status, shift;
2222
2223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226         else
2227                 status = tr32(TG3_CPMU_DRV_STATUS);
2228
2229         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230         status &= ~(TG3_GPIO_MSG_MASK << shift);
2231         status |= (newstat << shift);
2232
2233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236         else
2237                 tw32(TG3_CPMU_DRV_STATUS, status);
2238
2239         return status >> TG3_APE_GPIO_MSG_SHIFT;
2240 }
2241
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2243 {
2244         if (!tg3_flag(tp, IS_NIC))
2245                 return 0;
2246
2247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251                         return -EIO;
2252
2253                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2254
2255                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2257
2258                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259         } else {
2260                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2262         }
2263
2264         return 0;
2265 }
2266
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2268 {
2269         u32 grc_local_ctrl;
2270
2271         if (!tg3_flag(tp, IS_NIC) ||
2272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274                 return;
2275
2276         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2277
2278         tw32_wait_f(GRC_LOCAL_CTRL,
2279                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2281
2282         tw32_wait_f(GRC_LOCAL_CTRL,
2283                     grc_local_ctrl,
2284                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2285
2286         tw32_wait_f(GRC_LOCAL_CTRL,
2287                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2289 }
2290
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2292 {
2293         if (!tg3_flag(tp, IS_NIC))
2294                 return;
2295
2296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299                             (GRC_LCLCTRL_GPIO_OE0 |
2300                              GRC_LCLCTRL_GPIO_OE1 |
2301                              GRC_LCLCTRL_GPIO_OE2 |
2302                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2303                              GRC_LCLCTRL_GPIO_OUTPUT1),
2304                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2305         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309                                      GRC_LCLCTRL_GPIO_OE1 |
2310                                      GRC_LCLCTRL_GPIO_OE2 |
2311                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2312                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2313                                      tp->grc_local_ctrl;
2314                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2316
2317                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2320
2321                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2324         } else {
2325                 u32 no_gpio2;
2326                 u32 grc_local_ctrl = 0;
2327
2328                 /* Workaround to prevent overdrawing Amps. */
2329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332                                     grc_local_ctrl,
2333                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2334                 }
2335
2336                 /* On 5753 and variants, GPIO2 cannot be used. */
2337                 no_gpio2 = tp->nic_sram_data_cfg &
2338                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2339
2340                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341                                   GRC_LCLCTRL_GPIO_OE1 |
2342                                   GRC_LCLCTRL_GPIO_OE2 |
2343                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2344                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2345                 if (no_gpio2) {
2346                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2348                 }
2349                 tw32_wait_f(GRC_LOCAL_CTRL,
2350                             tp->grc_local_ctrl | grc_local_ctrl,
2351                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2354
2355                 tw32_wait_f(GRC_LOCAL_CTRL,
2356                             tp->grc_local_ctrl | grc_local_ctrl,
2357                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359                 if (!no_gpio2) {
2360                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361                         tw32_wait_f(GRC_LOCAL_CTRL,
2362                                     tp->grc_local_ctrl | grc_local_ctrl,
2363                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2364                 }
2365         }
2366 }
2367
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2369 {
2370         u32 msg = 0;
2371
2372         /* Serialize power state transitions */
2373         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374                 return;
2375
2376         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377                 msg = TG3_GPIO_MSG_NEED_VAUX;
2378
2379         msg = tg3_set_function_status(tp, msg);
2380
2381         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382                 goto done;
2383
2384         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385                 tg3_pwrsrc_switch_to_vaux(tp);
2386         else
2387                 tg3_pwrsrc_die_with_vmain(tp);
2388
2389 done:
2390         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2391 }
2392
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2394 {
2395         bool need_vaux = false;
2396
2397         /* The GPIOs do something completely different on 57765. */
2398         if (!tg3_flag(tp, IS_NIC) ||
2399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400                 return;
2401
2402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405                 tg3_frob_aux_power_5717(tp, include_wol ?
2406                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407                 return;
2408         }
2409
2410         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411                 struct net_device *dev_peer;
2412
2413                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2414
2415                 /* remove_one() may have been run on the peer. */
2416                 if (dev_peer) {
2417                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2418
2419                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2420                                 return;
2421
2422                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423                             tg3_flag(tp_peer, ENABLE_ASF))
2424                                 need_vaux = true;
2425                 }
2426         }
2427
2428         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429             tg3_flag(tp, ENABLE_ASF))
2430                 need_vaux = true;
2431
2432         if (need_vaux)
2433                 tg3_pwrsrc_switch_to_vaux(tp);
2434         else
2435                 tg3_pwrsrc_die_with_vmain(tp);
2436 }
2437
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2439 {
2440         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441                 return 1;
2442         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443                 if (speed != SPEED_10)
2444                         return 1;
2445         } else if (speed == SPEED_10)
2446                 return 1;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_setup_phy(struct tg3 *, int);
2452
2453 #define RESET_KIND_SHUTDOWN     0
2454 #define RESET_KIND_INIT         1
2455 #define RESET_KIND_SUSPEND      2
2456
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2459
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2461 {
2462         u32 val;
2463
2464         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2468
2469                         sg_dig_ctrl |=
2470                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2473                 }
2474                 return;
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 tg3_bmcr_reset(tp);
2479                 val = tr32(GRC_MISC_CFG);
2480                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481                 udelay(40);
2482                 return;
2483         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484                 u32 phytest;
2485                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486                         u32 phy;
2487
2488                         tg3_writephy(tp, MII_ADVERTISE, 0);
2489                         tg3_writephy(tp, MII_BMCR,
2490                                      BMCR_ANENABLE | BMCR_ANRESTART);
2491
2492                         tg3_writephy(tp, MII_TG3_FET_TEST,
2493                                      phytest | MII_TG3_FET_SHADOW_EN);
2494                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496                                 tg3_writephy(tp,
2497                                              MII_TG3_FET_SHDW_AUXMODE4,
2498                                              phy);
2499                         }
2500                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2501                 }
2502                 return;
2503         } else if (do_low_power) {
2504                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2506
2507                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2510                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2511         }
2512
2513         /* The PHY should not be powered down on some chips because
2514          * of bugs.
2515          */
2516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520                 return;
2521
2522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2528         }
2529
2530         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2531 }
2532
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, NVRAM)) {
2537                 int i;
2538
2539                 if (tp->nvram_lock_cnt == 0) {
2540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541                         for (i = 0; i < 8000; i++) {
2542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543                                         break;
2544                                 udelay(20);
2545                         }
2546                         if (i == 8000) {
2547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548                                 return -ENODEV;
2549                         }
2550                 }
2551                 tp->nvram_lock_cnt++;
2552         }
2553         return 0;
2554 }
2555
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2558 {
2559         if (tg3_flag(tp, NVRAM)) {
2560                 if (tp->nvram_lock_cnt > 0)
2561                         tp->nvram_lock_cnt--;
2562                 if (tp->nvram_lock_cnt == 0)
2563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2564         }
2565 }
2566
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2569 {
2570         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571                 u32 nvaccess = tr32(NVRAM_ACCESS);
2572
2573                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2574         }
2575 }
2576
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2579 {
2580         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581                 u32 nvaccess = tr32(NVRAM_ACCESS);
2582
2583                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2584         }
2585 }
2586
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588                                         u32 offset, u32 *val)
2589 {
2590         u32 tmp;
2591         int i;
2592
2593         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594                 return -EINVAL;
2595
2596         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597                                         EEPROM_ADDR_DEVID_MASK |
2598                                         EEPROM_ADDR_READ);
2599         tw32(GRC_EEPROM_ADDR,
2600              tmp |
2601              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603               EEPROM_ADDR_ADDR_MASK) |
2604              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2605
2606         for (i = 0; i < 1000; i++) {
2607                 tmp = tr32(GRC_EEPROM_ADDR);
2608
2609                 if (tmp & EEPROM_ADDR_COMPLETE)
2610                         break;
2611                 msleep(1);
2612         }
2613         if (!(tmp & EEPROM_ADDR_COMPLETE))
2614                 return -EBUSY;
2615
2616         tmp = tr32(GRC_EEPROM_DATA);
2617
2618         /*
2619          * The data will always be opposite the native endian
2620          * format.  Perform a blind byteswap to compensate.
2621          */
2622         *val = swab32(tmp);
2623
2624         return 0;
2625 }
2626
2627 #define NVRAM_CMD_TIMEOUT 10000
2628
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2630 {
2631         int i;
2632
2633         tw32(NVRAM_CMD, nvram_cmd);
2634         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635                 udelay(10);
2636                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637                         udelay(10);
2638                         break;
2639                 }
2640         }
2641
2642         if (i == NVRAM_CMD_TIMEOUT)
2643                 return -EBUSY;
2644
2645         return 0;
2646 }
2647
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2649 {
2650         if (tg3_flag(tp, NVRAM) &&
2651             tg3_flag(tp, NVRAM_BUFFERED) &&
2652             tg3_flag(tp, FLASH) &&
2653             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654             (tp->nvram_jedecnum == JEDEC_ATMEL))
2655
2656                 addr = ((addr / tp->nvram_pagesize) <<
2657                         ATMEL_AT45DB0X1B_PAGE_POS) +
2658                        (addr % tp->nvram_pagesize);
2659
2660         return addr;
2661 }
2662
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2664 {
2665         if (tg3_flag(tp, NVRAM) &&
2666             tg3_flag(tp, NVRAM_BUFFERED) &&
2667             tg3_flag(tp, FLASH) &&
2668             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669             (tp->nvram_jedecnum == JEDEC_ATMEL))
2670
2671                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672                         tp->nvram_pagesize) +
2673                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2674
2675         return addr;
2676 }
2677
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679  * the byteswapping settings for all other register accesses.
2680  * tg3 devices are BE devices, so on a BE machine, the data
2681  * returned will be exactly as it is seen in NVRAM.  On a LE
2682  * machine, the 32-bit value will be byteswapped.
2683  */
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2685 {
2686         int ret;
2687
2688         if (!tg3_flag(tp, NVRAM))
2689                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2690
2691         offset = tg3_nvram_phys_addr(tp, offset);
2692
2693         if (offset > NVRAM_ADDR_MSK)
2694                 return -EINVAL;
2695
2696         ret = tg3_nvram_lock(tp);
2697         if (ret)
2698                 return ret;
2699
2700         tg3_enable_nvram_access(tp);
2701
2702         tw32(NVRAM_ADDR, offset);
2703         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2705
2706         if (ret == 0)
2707                 *val = tr32(NVRAM_RDDATA);
2708
2709         tg3_disable_nvram_access(tp);
2710
2711         tg3_nvram_unlock(tp);
2712
2713         return ret;
2714 }
2715
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2718 {
2719         u32 v;
2720         int res = tg3_nvram_read(tp, offset, &v);
2721         if (!res)
2722                 *val = cpu_to_be32(v);
2723         return res;
2724 }
2725
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2728 {
2729         u32 addr_high, addr_low;
2730         int i;
2731
2732         addr_high = ((tp->dev->dev_addr[0] << 8) |
2733                      tp->dev->dev_addr[1]);
2734         addr_low = ((tp->dev->dev_addr[2] << 24) |
2735                     (tp->dev->dev_addr[3] << 16) |
2736                     (tp->dev->dev_addr[4] <<  8) |
2737                     (tp->dev->dev_addr[5] <<  0));
2738         for (i = 0; i < 4; i++) {
2739                 if (i == 1 && skip_mac_1)
2740                         continue;
2741                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747                 for (i = 0; i < 12; i++) {
2748                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2750                 }
2751         }
2752
2753         addr_high = (tp->dev->dev_addr[0] +
2754                      tp->dev->dev_addr[1] +
2755                      tp->dev->dev_addr[2] +
2756                      tp->dev->dev_addr[3] +
2757                      tp->dev->dev_addr[4] +
2758                      tp->dev->dev_addr[5]) &
2759                 TX_BACKOFF_SEED_MASK;
2760         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2761 }
2762
2763 static void tg3_enable_register_access(struct tg3 *tp)
2764 {
2765         /*
2766          * Make sure register accesses (indirect or otherwise) will function
2767          * correctly.
2768          */
2769         pci_write_config_dword(tp->pdev,
2770                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2771 }
2772
2773 static int tg3_power_up(struct tg3 *tp)
2774 {
2775         int err;
2776
2777         tg3_enable_register_access(tp);
2778
2779         err = pci_set_power_state(tp->pdev, PCI_D0);
2780         if (!err) {
2781                 /* Switch out of Vaux if it is a NIC */
2782                 tg3_pwrsrc_switch_to_vmain(tp);
2783         } else {
2784                 netdev_err(tp->dev, "Transition to D0 failed\n");
2785         }
2786
2787         return err;
2788 }
2789
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2791 {
2792         u32 misc_host_ctrl;
2793         bool device_should_wake, do_low_power;
2794
2795         tg3_enable_register_access(tp);
2796
2797         /* Restore the CLKREQ setting. */
2798         if (tg3_flag(tp, CLKREQ_BUG)) {
2799                 u16 lnkctl;
2800
2801                 pci_read_config_word(tp->pdev,
2802                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803                                      &lnkctl);
2804                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805                 pci_write_config_word(tp->pdev,
2806                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807                                       lnkctl);
2808         }
2809
2810         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811         tw32(TG3PCI_MISC_HOST_CTRL,
2812              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2813
2814         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815                              tg3_flag(tp, WOL_ENABLE);
2816
2817         if (tg3_flag(tp, USE_PHYLIB)) {
2818                 do_low_power = false;
2819                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821                         struct phy_device *phydev;
2822                         u32 phyid, advertising;
2823
2824                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2825
2826                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2827
2828                         tp->link_config.orig_speed = phydev->speed;
2829                         tp->link_config.orig_duplex = phydev->duplex;
2830                         tp->link_config.orig_autoneg = phydev->autoneg;
2831                         tp->link_config.orig_advertising = phydev->advertising;
2832
2833                         advertising = ADVERTISED_TP |
2834                                       ADVERTISED_Pause |
2835                                       ADVERTISED_Autoneg |
2836                                       ADVERTISED_10baseT_Half;
2837
2838                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2840                                         advertising |=
2841                                                 ADVERTISED_100baseT_Half |
2842                                                 ADVERTISED_100baseT_Full |
2843                                                 ADVERTISED_10baseT_Full;
2844                                 else
2845                                         advertising |= ADVERTISED_10baseT_Full;
2846                         }
2847
2848                         phydev->advertising = advertising;
2849
2850                         phy_start_aneg(phydev);
2851
2852                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853                         if (phyid != PHY_ID_BCMAC131) {
2854                                 phyid &= PHY_BCM_OUI_MASK;
2855                                 if (phyid == PHY_BCM_OUI_1 ||
2856                                     phyid == PHY_BCM_OUI_2 ||
2857                                     phyid == PHY_BCM_OUI_3)
2858                                         do_low_power = true;
2859                         }
2860                 }
2861         } else {
2862                 do_low_power = true;
2863
2864                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866                         tp->link_config.orig_speed = tp->link_config.speed;
2867                         tp->link_config.orig_duplex = tp->link_config.duplex;
2868                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2869                 }
2870
2871                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872                         tp->link_config.speed = SPEED_10;
2873                         tp->link_config.duplex = DUPLEX_HALF;
2874                         tp->link_config.autoneg = AUTONEG_ENABLE;
2875                         tg3_setup_phy(tp, 0);
2876                 }
2877         }
2878
2879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880                 u32 val;
2881
2882                 val = tr32(GRC_VCPU_EXT_CTRL);
2883                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885                 int i;
2886                 u32 val;
2887
2888                 for (i = 0; i < 200; i++) {
2889                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891                                 break;
2892                         msleep(1);
2893                 }
2894         }
2895         if (tg3_flag(tp, WOL_CAP))
2896                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897                                                      WOL_DRV_STATE_SHUTDOWN |
2898                                                      WOL_DRV_WOL |
2899                                                      WOL_SET_MAGIC_PKT);
2900
2901         if (device_should_wake) {
2902                 u32 mac_mode;
2903
2904                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905                         if (do_low_power &&
2906                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907                                 tg3_phy_auxctl_write(tp,
2908                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2910                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912                                 udelay(40);
2913                         }
2914
2915                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917                         else
2918                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2919
2920                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922                             ASIC_REV_5700) {
2923                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924                                              SPEED_100 : SPEED_10;
2925                                 if (tg3_5700_link_polarity(tp, speed))
2926                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2927                                 else
2928                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929                         }
2930                 } else {
2931                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2932                 }
2933
2934                 if (!tg3_flag(tp, 5750_PLUS))
2935                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2936
2937                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2941
2942                 if (tg3_flag(tp, ENABLE_APE))
2943                         mac_mode |= MAC_MODE_APE_TX_EN |
2944                                     MAC_MODE_APE_RX_EN |
2945                                     MAC_MODE_TDE_ENABLE;
2946
2947                 tw32_f(MAC_MODE, mac_mode);
2948                 udelay(100);
2949
2950                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951                 udelay(10);
2952         }
2953
2954         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957                 u32 base_val;
2958
2959                 base_val = tp->pci_clock_ctrl;
2960                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961                              CLOCK_CTRL_TXCLK_DISABLE);
2962
2963                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965         } else if (tg3_flag(tp, 5780_CLASS) ||
2966                    tg3_flag(tp, CPMU_PRESENT) ||
2967                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968                 /* do nothing */
2969         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970                 u32 newbits1, newbits2;
2971
2972                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975                                     CLOCK_CTRL_TXCLK_DISABLE |
2976                                     CLOCK_CTRL_ALTCLK);
2977                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978                 } else if (tg3_flag(tp, 5705_PLUS)) {
2979                         newbits1 = CLOCK_CTRL_625_CORE;
2980                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981                 } else {
2982                         newbits1 = CLOCK_CTRL_ALTCLK;
2983                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2984                 }
2985
2986                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987                             40);
2988
2989                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990                             40);
2991
2992                 if (!tg3_flag(tp, 5705_PLUS)) {
2993                         u32 newbits3;
2994
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998                                             CLOCK_CTRL_TXCLK_DISABLE |
2999                                             CLOCK_CTRL_44MHZ_CORE);
3000                         } else {
3001                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3002                         }
3003
3004                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005                                     tp->pci_clock_ctrl | newbits3, 40);
3006                 }
3007         }
3008
3009         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010                 tg3_power_down_phy(tp, do_low_power);
3011
3012         tg3_frob_aux_power(tp, true);
3013
3014         /* Workaround for unstable PLL clock */
3015         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017                 u32 val = tr32(0x7d00);
3018
3019                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020                 tw32(0x7d00, val);
3021                 if (!tg3_flag(tp, ENABLE_ASF)) {
3022                         int err;
3023
3024                         err = tg3_nvram_lock(tp);
3025                         tg3_halt_cpu(tp, RX_CPU_BASE);
3026                         if (!err)
3027                                 tg3_nvram_unlock(tp);
3028                 }
3029         }
3030
3031         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3032
3033         return 0;
3034 }
3035
3036 static void tg3_power_down(struct tg3 *tp)
3037 {
3038         tg3_power_down_prepare(tp);
3039
3040         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041         pci_set_power_state(tp->pdev, PCI_D3hot);
3042 }
3043
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3045 {
3046         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047         case MII_TG3_AUX_STAT_10HALF:
3048                 *speed = SPEED_10;
3049                 *duplex = DUPLEX_HALF;
3050                 break;
3051
3052         case MII_TG3_AUX_STAT_10FULL:
3053                 *speed = SPEED_10;
3054                 *duplex = DUPLEX_FULL;
3055                 break;
3056
3057         case MII_TG3_AUX_STAT_100HALF:
3058                 *speed = SPEED_100;
3059                 *duplex = DUPLEX_HALF;
3060                 break;
3061
3062         case MII_TG3_AUX_STAT_100FULL:
3063                 *speed = SPEED_100;
3064                 *duplex = DUPLEX_FULL;
3065                 break;
3066
3067         case MII_TG3_AUX_STAT_1000HALF:
3068                 *speed = SPEED_1000;
3069                 *duplex = DUPLEX_HALF;
3070                 break;
3071
3072         case MII_TG3_AUX_STAT_1000FULL:
3073                 *speed = SPEED_1000;
3074                 *duplex = DUPLEX_FULL;
3075                 break;
3076
3077         default:
3078                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080                                  SPEED_10;
3081                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082                                   DUPLEX_HALF;
3083                         break;
3084                 }
3085                 *speed = SPEED_INVALID;
3086                 *duplex = DUPLEX_INVALID;
3087                 break;
3088         }
3089 }
3090
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3092 {
3093         int err = 0;
3094         u32 val, new_adv;
3095
3096         new_adv = ADVERTISE_CSMA;
3097         if (advertise & ADVERTISED_10baseT_Half)
3098                 new_adv |= ADVERTISE_10HALF;
3099         if (advertise & ADVERTISED_10baseT_Full)
3100                 new_adv |= ADVERTISE_10FULL;
3101         if (advertise & ADVERTISED_100baseT_Half)
3102                 new_adv |= ADVERTISE_100HALF;
3103         if (advertise & ADVERTISED_100baseT_Full)
3104                 new_adv |= ADVERTISE_100FULL;
3105
3106         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3107
3108         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109         if (err)
3110                 goto done;
3111
3112         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113                 goto done;
3114
3115         new_adv = 0;
3116         if (advertise & ADVERTISED_1000baseT_Half)
3117                 new_adv |= ADVERTISE_1000HALF;
3118         if (advertise & ADVERTISED_1000baseT_Full)
3119                 new_adv |= ADVERTISE_1000FULL;
3120
3121         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3124
3125         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126         if (err)
3127                 goto done;
3128
3129         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130                 goto done;
3131
3132         tw32(TG3_CPMU_EEE_MODE,
3133              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3134
3135         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136         if (!err) {
3137                 u32 err2;
3138
3139                 val = 0;
3140                 /* Advertise 100-BaseTX EEE ability */
3141                 if (advertise & ADVERTISED_100baseT_Full)
3142                         val |= MDIO_AN_EEE_ADV_100TX;
3143                 /* Advertise 1000-BaseT EEE ability */
3144                 if (advertise & ADVERTISED_1000baseT_Full)
3145                         val |= MDIO_AN_EEE_ADV_1000T;
3146                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147                 if (err)
3148                         val = 0;
3149
3150                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151                 case ASIC_REV_5717:
3152                 case ASIC_REV_57765:
3153                 case ASIC_REV_5719:
3154                         /* If we advertised any eee advertisements above... */
3155                         if (val)
3156                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3157                                       MII_TG3_DSP_TAP26_RMRXSTO |
3158                                       MII_TG3_DSP_TAP26_OPCSINPT;
3159                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160                         /* Fall through */
3161                 case ASIC_REV_5720:
3162                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3165                 }
3166
3167                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168                 if (!err)
3169                         err = err2;
3170         }
3171
3172 done:
3173         return err;
3174 }
3175
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3177 {
3178         u32 new_adv;
3179         int i;
3180
3181         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182                 new_adv = ADVERTISED_10baseT_Half |
3183                           ADVERTISED_10baseT_Full;
3184                 if (tg3_flag(tp, WOL_SPEED_100MB))
3185                         new_adv |= ADVERTISED_100baseT_Half |
3186                                    ADVERTISED_100baseT_Full;
3187
3188                 tg3_phy_autoneg_cfg(tp, new_adv,
3189                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3190         } else if (tp->link_config.speed == SPEED_INVALID) {
3191                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192                         tp->link_config.advertising &=
3193                                 ~(ADVERTISED_1000baseT_Half |
3194                                   ADVERTISED_1000baseT_Full);
3195
3196                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197                                     tp->link_config.flowctrl);
3198         } else {
3199                 /* Asking for a specific link mode. */
3200                 if (tp->link_config.speed == SPEED_1000) {
3201                         if (tp->link_config.duplex == DUPLEX_FULL)
3202                                 new_adv = ADVERTISED_1000baseT_Full;
3203                         else
3204                                 new_adv = ADVERTISED_1000baseT_Half;
3205                 } else if (tp->link_config.speed == SPEED_100) {
3206                         if (tp->link_config.duplex == DUPLEX_FULL)
3207                                 new_adv = ADVERTISED_100baseT_Full;
3208                         else
3209                                 new_adv = ADVERTISED_100baseT_Half;
3210                 } else {
3211                         if (tp->link_config.duplex == DUPLEX_FULL)
3212                                 new_adv = ADVERTISED_10baseT_Full;
3213                         else
3214                                 new_adv = ADVERTISED_10baseT_Half;
3215                 }
3216
3217                 tg3_phy_autoneg_cfg(tp, new_adv,
3218                                     tp->link_config.flowctrl);
3219         }
3220
3221         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222             tp->link_config.speed != SPEED_INVALID) {
3223                 u32 bmcr, orig_bmcr;
3224
3225                 tp->link_config.active_speed = tp->link_config.speed;
3226                 tp->link_config.active_duplex = tp->link_config.duplex;
3227
3228                 bmcr = 0;
3229                 switch (tp->link_config.speed) {
3230                 default:
3231                 case SPEED_10:
3232                         break;
3233
3234                 case SPEED_100:
3235                         bmcr |= BMCR_SPEED100;
3236                         break;
3237
3238                 case SPEED_1000:
3239                         bmcr |= BMCR_SPEED1000;
3240                         break;
3241                 }
3242
3243                 if (tp->link_config.duplex == DUPLEX_FULL)
3244                         bmcr |= BMCR_FULLDPLX;
3245
3246                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247                     (bmcr != orig_bmcr)) {
3248                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249                         for (i = 0; i < 1500; i++) {
3250                                 u32 tmp;
3251
3252                                 udelay(10);
3253                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254                                     tg3_readphy(tp, MII_BMSR, &tmp))
3255                                         continue;
3256                                 if (!(tmp & BMSR_LSTATUS)) {
3257                                         udelay(40);
3258                                         break;
3259                                 }
3260                         }
3261                         tg3_writephy(tp, MII_BMCR, bmcr);
3262                         udelay(40);
3263                 }
3264         } else {
3265                 tg3_writephy(tp, MII_BMCR,
3266                              BMCR_ANENABLE | BMCR_ANRESTART);
3267         }
3268 }
3269
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3271 {
3272         int err;
3273
3274         /* Turn off tap power management. */
3275         /* Set Extended packet length bit */
3276         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3277
3278         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3283
3284         udelay(40);
3285
3286         return err;
3287 }
3288
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3290 {
3291         u32 adv_reg, all_mask = 0;
3292
3293         if (mask & ADVERTISED_10baseT_Half)
3294                 all_mask |= ADVERTISE_10HALF;
3295         if (mask & ADVERTISED_10baseT_Full)
3296                 all_mask |= ADVERTISE_10FULL;
3297         if (mask & ADVERTISED_100baseT_Half)
3298                 all_mask |= ADVERTISE_100HALF;
3299         if (mask & ADVERTISED_100baseT_Full)
3300                 all_mask |= ADVERTISE_100FULL;
3301
3302         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303                 return 0;
3304
3305         if ((adv_reg & all_mask) != all_mask)
3306                 return 0;
3307         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308                 u32 tg3_ctrl;
3309
3310                 all_mask = 0;
3311                 if (mask & ADVERTISED_1000baseT_Half)
3312                         all_mask |= ADVERTISE_1000HALF;
3313                 if (mask & ADVERTISED_1000baseT_Full)
3314                         all_mask |= ADVERTISE_1000FULL;
3315
3316                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317                         return 0;
3318
3319                 if ((tg3_ctrl & all_mask) != all_mask)
3320                         return 0;
3321         }
3322         return 1;
3323 }
3324
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3326 {
3327         u32 curadv, reqadv;
3328
3329         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330                 return 1;
3331
3332         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3334
3335         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336                 if (curadv != reqadv)
3337                         return 0;
3338
3339                 if (tg3_flag(tp, PAUSE_AUTONEG))
3340                         tg3_readphy(tp, MII_LPA, rmtadv);
3341         } else {
3342                 /* Reprogram the advertisement register, even if it
3343                  * does not affect the current link.  If the link
3344                  * gets renegotiated in the future, we can save an
3345                  * additional renegotiation cycle by advertising
3346                  * it correctly in the first place.
3347                  */
3348                 if (curadv != reqadv) {
3349                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350                                      ADVERTISE_PAUSE_ASYM);
3351                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3352                 }
3353         }
3354
3355         return 1;
3356 }
3357
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3359 {
3360         int current_link_up;
3361         u32 bmsr, val;
3362         u32 lcl_adv, rmt_adv;
3363         u16 current_speed;
3364         u8 current_duplex;
3365         int i, err;
3366
3367         tw32(MAC_EVENT, 0);
3368
3369         tw32_f(MAC_STATUS,
3370              (MAC_STATUS_SYNC_CHANGED |
3371               MAC_STATUS_CFG_CHANGED |
3372               MAC_STATUS_MI_COMPLETION |
3373               MAC_STATUS_LNKSTATE_CHANGED));
3374         udelay(40);
3375
3376         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377                 tw32_f(MAC_MI_MODE,
3378                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379                 udelay(80);
3380         }
3381
3382         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3383
3384         /* Some third-party PHYs need to be reset on link going
3385          * down.
3386          */
3387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390             netif_carrier_ok(tp->dev)) {
3391                 tg3_readphy(tp, MII_BMSR, &bmsr);
3392                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393                     !(bmsr & BMSR_LSTATUS))
3394                         force_reset = 1;
3395         }
3396         if (force_reset)
3397                 tg3_phy_reset(tp);
3398
3399         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400                 tg3_readphy(tp, MII_BMSR, &bmsr);
3401                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402                     !tg3_flag(tp, INIT_COMPLETE))
3403                         bmsr = 0;
3404
3405                 if (!(bmsr & BMSR_LSTATUS)) {
3406                         err = tg3_init_5401phy_dsp(tp);
3407                         if (err)
3408                                 return err;
3409
3410                         tg3_readphy(tp, MII_BMSR, &bmsr);
3411                         for (i = 0; i < 1000; i++) {
3412                                 udelay(10);
3413                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414                                     (bmsr & BMSR_LSTATUS)) {
3415                                         udelay(40);
3416                                         break;
3417                                 }
3418                         }
3419
3420                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421                             TG3_PHY_REV_BCM5401_B0 &&
3422                             !(bmsr & BMSR_LSTATUS) &&
3423                             tp->link_config.active_speed == SPEED_1000) {
3424                                 err = tg3_phy_reset(tp);
3425                                 if (!err)
3426                                         err = tg3_init_5401phy_dsp(tp);
3427                                 if (err)
3428                                         return err;
3429                         }
3430                 }
3431         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433                 /* 5701 {A0,B0} CRC bug workaround */
3434                 tg3_writephy(tp, 0x15, 0x0a75);
3435                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3438         }
3439
3440         /* Clear pending interrupts... */
3441         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443
3444         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3448
3449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454                 else
3455                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3456         }
3457
3458         current_link_up = 0;
3459         current_speed = SPEED_INVALID;
3460         current_duplex = DUPLEX_INVALID;
3461
3462         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463                 err = tg3_phy_auxctl_read(tp,
3464                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465                                           &val);
3466                 if (!err && !(val & (1 << 10))) {
3467                         tg3_phy_auxctl_write(tp,
3468                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469                                              val | (1 << 10));
3470                         goto relink;
3471                 }
3472         }
3473
3474         bmsr = 0;
3475         for (i = 0; i < 100; i++) {
3476                 tg3_readphy(tp, MII_BMSR, &bmsr);
3477                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478                     (bmsr & BMSR_LSTATUS))
3479                         break;
3480                 udelay(40);
3481         }
3482
3483         if (bmsr & BMSR_LSTATUS) {
3484                 u32 aux_stat, bmcr;
3485
3486                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487                 for (i = 0; i < 2000; i++) {
3488                         udelay(10);
3489                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490                             aux_stat)
3491                                 break;
3492                 }
3493
3494                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495                                              &current_speed,
3496                                              &current_duplex);
3497
3498                 bmcr = 0;
3499                 for (i = 0; i < 200; i++) {
3500                         tg3_readphy(tp, MII_BMCR, &bmcr);
3501                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502                                 continue;
3503                         if (bmcr && bmcr != 0x7fff)
3504                                 break;
3505                         udelay(10);
3506                 }
3507
3508                 lcl_adv = 0;
3509                 rmt_adv = 0;
3510
3511                 tp->link_config.active_speed = current_speed;
3512                 tp->link_config.active_duplex = current_duplex;
3513
3514                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515                         if ((bmcr & BMCR_ANENABLE) &&
3516                             tg3_copper_is_advertising_all(tp,
3517                                                 tp->link_config.advertising)) {
3518                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519                                                                   &rmt_adv))
3520                                         current_link_up = 1;
3521                         }
3522                 } else {
3523                         if (!(bmcr & BMCR_ANENABLE) &&
3524                             tp->link_config.speed == current_speed &&
3525                             tp->link_config.duplex == current_duplex &&
3526                             tp->link_config.flowctrl ==
3527                             tp->link_config.active_flowctrl) {
3528                                 current_link_up = 1;
3529                         }
3530                 }
3531
3532                 if (current_link_up == 1 &&
3533                     tp->link_config.active_duplex == DUPLEX_FULL)
3534                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3535         }
3536
3537 relink:
3538         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539                 tg3_phy_copper_begin(tp);
3540
3541                 tg3_readphy(tp, MII_BMSR, &bmsr);
3542                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544                         current_link_up = 1;
3545         }
3546
3547         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548         if (current_link_up == 1) {
3549                 if (tp->link_config.active_speed == SPEED_100 ||
3550                     tp->link_config.active_speed == SPEED_10)
3551                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552                 else
3553                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556         else
3557                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3558
3559         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560         if (tp->link_config.active_duplex == DUPLEX_HALF)
3561                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3562
3563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564                 if (current_link_up == 1 &&
3565                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567                 else
3568                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3569         }
3570
3571         /* ??? Without this setting Netgear GA302T PHY does not
3572          * ??? send/receive packets...
3573          */
3574         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578                 udelay(80);
3579         }
3580
3581         tw32_f(MAC_MODE, tp->mac_mode);
3582         udelay(40);
3583
3584         tg3_phy_eee_adjust(tp, current_link_up);
3585
3586         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587                 /* Polled via timer. */
3588                 tw32_f(MAC_EVENT, 0);
3589         } else {
3590                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3591         }
3592         udelay(40);
3593
3594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595             current_link_up == 1 &&
3596             tp->link_config.active_speed == SPEED_1000 &&
3597             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598                 udelay(120);
3599                 tw32_f(MAC_STATUS,
3600                      (MAC_STATUS_SYNC_CHANGED |
3601                       MAC_STATUS_CFG_CHANGED));
3602                 udelay(40);
3603                 tg3_write_mem(tp,
3604                               NIC_SRAM_FIRMWARE_MBOX,
3605                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3606         }
3607
3608         /* Prevent send BD corruption. */
3609         if (tg3_flag(tp, CLKREQ_BUG)) {
3610                 u16 oldlnkctl, newlnkctl;
3611
3612                 pci_read_config_word(tp->pdev,
3613                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614                                      &oldlnkctl);
3615                 if (tp->link_config.active_speed == SPEED_100 ||
3616                     tp->link_config.active_speed == SPEED_10)
3617                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618                 else
3619                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620                 if (newlnkctl != oldlnkctl)
3621                         pci_write_config_word(tp->pdev,
3622                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623                                               newlnkctl);
3624         }
3625
3626         if (current_link_up != netif_carrier_ok(tp->dev)) {
3627                 if (current_link_up)
3628                         netif_carrier_on(tp->dev);
3629                 else
3630                         netif_carrier_off(tp->dev);
3631                 tg3_link_report(tp);
3632         }
3633
3634         return 0;
3635 }
3636
3637 struct tg3_fiber_aneginfo {
3638         int state;
3639 #define ANEG_STATE_UNKNOWN              0
3640 #define ANEG_STATE_AN_ENABLE            1
3641 #define ANEG_STATE_RESTART_INIT         2
3642 #define ANEG_STATE_RESTART              3
3643 #define ANEG_STATE_DISABLE_LINK_OK      4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3645 #define ANEG_STATE_ABILITY_DETECT       6
3646 #define ANEG_STATE_ACK_DETECT_INIT      7
3647 #define ANEG_STATE_ACK_DETECT           8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3649 #define ANEG_STATE_COMPLETE_ACK         10
3650 #define ANEG_STATE_IDLE_DETECT_INIT     11
3651 #define ANEG_STATE_IDLE_DETECT          12
3652 #define ANEG_STATE_LINK_OK              13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3655
3656         u32 flags;
3657 #define MR_AN_ENABLE            0x00000001
3658 #define MR_RESTART_AN           0x00000002
3659 #define MR_AN_COMPLETE          0x00000004
3660 #define MR_PAGE_RX              0x00000008
3661 #define MR_NP_LOADED            0x00000010
3662 #define MR_TOGGLE_TX            0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3670 #define MR_TOGGLE_RX            0x00002000
3671 #define MR_NP_RX                0x00004000
3672
3673 #define MR_LINK_OK              0x80000000
3674
3675         unsigned long link_time, cur_time;
3676
3677         u32 ability_match_cfg;
3678         int ability_match_count;
3679
3680         char ability_match, idle_match, ack_match;
3681
3682         u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP             0x00000080
3684 #define ANEG_CFG_ACK            0x00000040
3685 #define ANEG_CFG_RF2            0x00000020
3686 #define ANEG_CFG_RF1            0x00000010
3687 #define ANEG_CFG_PS2            0x00000001
3688 #define ANEG_CFG_PS1            0x00008000
3689 #define ANEG_CFG_HD             0x00004000
3690 #define ANEG_CFG_FD             0x00002000
3691 #define ANEG_CFG_INVAL          0x00001f06
3692
3693 };
3694 #define ANEG_OK         0
3695 #define ANEG_DONE       1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED     -1
3698
3699 #define ANEG_STATE_SETTLE_TIME  10000
3700
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702                                    struct tg3_fiber_aneginfo *ap)
3703 {
3704         u16 flowctrl;
3705         unsigned long delta;
3706         u32 rx_cfg_reg;
3707         int ret;
3708
3709         if (ap->state == ANEG_STATE_UNKNOWN) {
3710                 ap->rxconfig = 0;
3711                 ap->link_time = 0;
3712                 ap->cur_time = 0;
3713                 ap->ability_match_cfg = 0;
3714                 ap->ability_match_count = 0;
3715                 ap->ability_match = 0;
3716                 ap->idle_match = 0;
3717                 ap->ack_match = 0;
3718         }
3719         ap->cur_time++;
3720
3721         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3723
3724                 if (rx_cfg_reg != ap->ability_match_cfg) {
3725                         ap->ability_match_cfg = rx_cfg_reg;
3726                         ap->ability_match = 0;
3727                         ap->ability_match_count = 0;
3728                 } else {
3729                         if (++ap->ability_match_count > 1) {
3730                                 ap->ability_match = 1;
3731                                 ap->ability_match_cfg = rx_cfg_reg;
3732                         }
3733                 }
3734                 if (rx_cfg_reg & ANEG_CFG_ACK)
3735                         ap->ack_match = 1;
3736                 else
3737                         ap->ack_match = 0;
3738
3739                 ap->idle_match = 0;
3740         } else {
3741                 ap->idle_match = 1;
3742                 ap->ability_match_cfg = 0;
3743                 ap->ability_match_count = 0;
3744                 ap->ability_match = 0;
3745                 ap->ack_match = 0;
3746
3747                 rx_cfg_reg = 0;
3748         }
3749
3750         ap->rxconfig = rx_cfg_reg;
3751         ret = ANEG_OK;
3752
3753         switch (ap->state) {
3754         case ANEG_STATE_UNKNOWN:
3755                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757
3758                 /* fallthru */
3759         case ANEG_STATE_AN_ENABLE:
3760                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761                 if (ap->flags & MR_AN_ENABLE) {
3762                         ap->link_time = 0;
3763                         ap->cur_time = 0;
3764                         ap->ability_match_cfg = 0;
3765                         ap->ability_match_count = 0;
3766                         ap->ability_match = 0;
3767                         ap->idle_match = 0;
3768                         ap->ack_match = 0;
3769
3770                         ap->state = ANEG_STATE_RESTART_INIT;
3771                 } else {
3772                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_RESTART_INIT:
3777                 ap->link_time = ap->cur_time;
3778                 ap->flags &= ~(MR_NP_LOADED);
3779                 ap->txconfig = 0;
3780                 tw32(MAC_TX_AUTO_NEG, 0);
3781                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782                 tw32_f(MAC_MODE, tp->mac_mode);
3783                 udelay(40);
3784
3785                 ret = ANEG_TIMER_ENAB;
3786                 ap->state = ANEG_STATE_RESTART;
3787
3788                 /* fallthru */
3789         case ANEG_STATE_RESTART:
3790                 delta = ap->cur_time - ap->link_time;
3791                 if (delta > ANEG_STATE_SETTLE_TIME)
3792                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793                 else
3794                         ret = ANEG_TIMER_ENAB;
3795                 break;
3796
3797         case ANEG_STATE_DISABLE_LINK_OK:
3798                 ret = ANEG_DONE;
3799                 break;
3800
3801         case ANEG_STATE_ABILITY_DETECT_INIT:
3802                 ap->flags &= ~(MR_TOGGLE_TX);
3803                 ap->txconfig = ANEG_CFG_FD;
3804                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805                 if (flowctrl & ADVERTISE_1000XPAUSE)
3806                         ap->txconfig |= ANEG_CFG_PS1;
3807                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808                         ap->txconfig |= ANEG_CFG_PS2;
3809                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811                 tw32_f(MAC_MODE, tp->mac_mode);
3812                 udelay(40);
3813
3814                 ap->state = ANEG_STATE_ABILITY_DETECT;
3815                 break;
3816
3817         case ANEG_STATE_ABILITY_DETECT:
3818                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820                 break;
3821
3822         case ANEG_STATE_ACK_DETECT_INIT:
3823                 ap->txconfig |= ANEG_CFG_ACK;
3824                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826                 tw32_f(MAC_MODE, tp->mac_mode);
3827                 udelay(40);
3828
3829                 ap->state = ANEG_STATE_ACK_DETECT;
3830
3831                 /* fallthru */
3832         case ANEG_STATE_ACK_DETECT:
3833                 if (ap->ack_match != 0) {
3834                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837                         } else {
3838                                 ap->state = ANEG_STATE_AN_ENABLE;
3839                         }
3840                 } else if (ap->ability_match != 0 &&
3841                            ap->rxconfig == 0) {
3842                         ap->state = ANEG_STATE_AN_ENABLE;
3843                 }
3844                 break;
3845
3846         case ANEG_STATE_COMPLETE_ACK_INIT:
3847                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848                         ret = ANEG_FAILED;
3849                         break;
3850                 }
3851                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852                                MR_LP_ADV_HALF_DUPLEX |
3853                                MR_LP_ADV_SYM_PAUSE |
3854                                MR_LP_ADV_ASYM_PAUSE |
3855                                MR_LP_ADV_REMOTE_FAULT1 |
3856                                MR_LP_ADV_REMOTE_FAULT2 |
3857                                MR_LP_ADV_NEXT_PAGE |
3858                                MR_TOGGLE_RX |
3859                                MR_NP_RX);
3860                 if (ap->rxconfig & ANEG_CFG_FD)
3861                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862                 if (ap->rxconfig & ANEG_CFG_HD)
3863                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864                 if (ap->rxconfig & ANEG_CFG_PS1)
3865                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866                 if (ap->rxconfig & ANEG_CFG_PS2)
3867                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868                 if (ap->rxconfig & ANEG_CFG_RF1)
3869                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870                 if (ap->rxconfig & ANEG_CFG_RF2)
3871                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872                 if (ap->rxconfig & ANEG_CFG_NP)
3873                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3874
3875                 ap->link_time = ap->cur_time;
3876
3877                 ap->flags ^= (MR_TOGGLE_TX);
3878                 if (ap->rxconfig & 0x0008)
3879                         ap->flags |= MR_TOGGLE_RX;
3880                 if (ap->rxconfig & ANEG_CFG_NP)
3881                         ap->flags |= MR_NP_RX;
3882                 ap->flags |= MR_PAGE_RX;
3883
3884                 ap->state = ANEG_STATE_COMPLETE_ACK;
3885                 ret = ANEG_TIMER_ENAB;
3886                 break;
3887
3888         case ANEG_STATE_COMPLETE_ACK:
3889                 if (ap->ability_match != 0 &&
3890                     ap->rxconfig == 0) {
3891                         ap->state = ANEG_STATE_AN_ENABLE;
3892                         break;
3893                 }
3894                 delta = ap->cur_time - ap->link_time;
3895                 if (delta > ANEG_STATE_SETTLE_TIME) {
3896                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898                         } else {
3899                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900                                     !(ap->flags & MR_NP_RX)) {
3901                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902                                 } else {
3903                                         ret = ANEG_FAILED;
3904                                 }
3905                         }
3906                 }
3907                 break;
3908
3909         case ANEG_STATE_IDLE_DETECT_INIT:
3910                 ap->link_time = ap->cur_time;
3911                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912                 tw32_f(MAC_MODE, tp->mac_mode);
3913                 udelay(40);
3914
3915                 ap->state = ANEG_STATE_IDLE_DETECT;
3916                 ret = ANEG_TIMER_ENAB;
3917                 break;
3918
3919         case ANEG_STATE_IDLE_DETECT:
3920                 if (ap->ability_match != 0 &&
3921                     ap->rxconfig == 0) {
3922                         ap->state = ANEG_STATE_AN_ENABLE;
3923                         break;
3924                 }
3925                 delta = ap->cur_time - ap->link_time;
3926                 if (delta > ANEG_STATE_SETTLE_TIME) {
3927                         /* XXX another gem from the Broadcom driver :( */
3928                         ap->state = ANEG_STATE_LINK_OK;
3929                 }
3930                 break;
3931
3932         case ANEG_STATE_LINK_OK:
3933                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934                 ret = ANEG_DONE;
3935                 break;
3936
3937         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938                 /* ??? unimplemented */
3939                 break;
3940
3941         case ANEG_STATE_NEXT_PAGE_WAIT:
3942                 /* ??? unimplemented */
3943                 break;
3944
3945         default:
3946                 ret = ANEG_FAILED;
3947                 break;
3948         }
3949
3950         return ret;
3951 }
3952
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3954 {
3955         int res = 0;
3956         struct tg3_fiber_aneginfo aninfo;
3957         int status = ANEG_FAILED;
3958         unsigned int tick;
3959         u32 tmp;
3960
3961         tw32_f(MAC_TX_AUTO_NEG, 0);
3962
3963         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965         udelay(40);
3966
3967         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968         udelay(40);
3969
3970         memset(&aninfo, 0, sizeof(aninfo));
3971         aninfo.flags |= MR_AN_ENABLE;
3972         aninfo.state = ANEG_STATE_UNKNOWN;
3973         aninfo.cur_time = 0;
3974         tick = 0;
3975         while (++tick < 195000) {
3976                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977                 if (status == ANEG_DONE || status == ANEG_FAILED)
3978                         break;
3979
3980                 udelay(1);
3981         }
3982
3983         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984         tw32_f(MAC_MODE, tp->mac_mode);
3985         udelay(40);
3986
3987         *txflags = aninfo.txconfig;
3988         *rxflags = aninfo.flags;
3989
3990         if (status == ANEG_DONE &&
3991             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992                              MR_LP_ADV_FULL_DUPLEX)))
3993                 res = 1;
3994
3995         return res;
3996 }
3997
3998 static void tg3_init_bcm8002(struct tg3 *tp)
3999 {
4000         u32 mac_status = tr32(MAC_STATUS);
4001         int i;
4002
4003         /* Reset when initting first time or we have a link. */
4004         if (tg3_flag(tp, INIT_COMPLETE) &&
4005             !(mac_status & MAC_STATUS_PCS_SYNCED))
4006                 return;
4007
4008         /* Set PLL lock range. */
4009         tg3_writephy(tp, 0x16, 0x8007);
4010
4011         /* SW reset */
4012         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4013
4014         /* Wait for reset to complete. */
4015         /* XXX schedule_timeout() ... */
4016         for (i = 0; i < 500; i++)
4017                 udelay(10);
4018
4019         /* Config mode; select PMA/Ch 1 regs. */
4020         tg3_writephy(tp, 0x10, 0x8411);
4021
4022         /* Enable auto-lock and comdet, select txclk for tx. */
4023         tg3_writephy(tp, 0x11, 0x0a10);
4024
4025         tg3_writephy(tp, 0x18, 0x00a0);
4026         tg3_writephy(tp, 0x16, 0x41ff);
4027
4028         /* Assert and deassert POR. */
4029         tg3_writephy(tp, 0x13, 0x0400);
4030         udelay(40);
4031         tg3_writephy(tp, 0x13, 0x0000);
4032
4033         tg3_writephy(tp, 0x11, 0x0a50);
4034         udelay(40);
4035         tg3_writephy(tp, 0x11, 0x0a10);
4036
4037         /* Wait for signal to stabilize */
4038         /* XXX schedule_timeout() ... */
4039         for (i = 0; i < 15000; i++)
4040                 udelay(10);
4041
4042         /* Deselect the channel register so we can read the PHYID
4043          * later.
4044          */
4045         tg3_writephy(tp, 0x10, 0x8011);
4046 }
4047
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4049 {
4050         u16 flowctrl;
4051         u32 sg_dig_ctrl, sg_dig_status;
4052         u32 serdes_cfg, expected_sg_dig_ctrl;
4053         int workaround, port_a;
4054         int current_link_up;
4055
4056         serdes_cfg = 0;
4057         expected_sg_dig_ctrl = 0;
4058         workaround = 0;
4059         port_a = 1;
4060         current_link_up = 0;
4061
4062         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064                 workaround = 1;
4065                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066                         port_a = 0;
4067
4068                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069                 /* preserve bits 20-23 for voltage regulator */
4070                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4071         }
4072
4073         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4074
4075         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077                         if (workaround) {
4078                                 u32 val = serdes_cfg;
4079
4080                                 if (port_a)
4081                                         val |= 0xc010000;
4082                                 else
4083                                         val |= 0x4010000;
4084                                 tw32_f(MAC_SERDES_CFG, val);
4085                         }
4086
4087                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4088                 }
4089                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090                         tg3_setup_flow_control(tp, 0, 0);
4091                         current_link_up = 1;
4092                 }
4093                 goto out;
4094         }
4095
4096         /* Want auto-negotiation.  */
4097         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4098
4099         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100         if (flowctrl & ADVERTISE_1000XPAUSE)
4101                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4104
4105         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107                     tp->serdes_counter &&
4108                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109                                     MAC_STATUS_RCVD_CFG)) ==
4110                      MAC_STATUS_PCS_SYNCED)) {
4111                         tp->serdes_counter--;
4112                         current_link_up = 1;
4113                         goto out;
4114                 }
4115 restart_autoneg:
4116                 if (workaround)
4117                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119                 udelay(5);
4120                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4121
4122                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125                                  MAC_STATUS_SIGNAL_DET)) {
4126                 sg_dig_status = tr32(SG_DIG_STATUS);
4127                 mac_status = tr32(MAC_STATUS);
4128
4129                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131                         u32 local_adv = 0, remote_adv = 0;
4132
4133                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134                                 local_adv |= ADVERTISE_1000XPAUSE;
4135                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4137
4138                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139                                 remote_adv |= LPA_1000XPAUSE;
4140                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4142
4143                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4144                         current_link_up = 1;
4145                         tp->serdes_counter = 0;
4146                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148                         if (tp->serdes_counter)
4149                                 tp->serdes_counter--;
4150                         else {
4151                                 if (workaround) {
4152                                         u32 val = serdes_cfg;
4153
4154                                         if (port_a)
4155                                                 val |= 0xc010000;
4156                                         else
4157                                                 val |= 0x4010000;
4158
4159                                         tw32_f(MAC_SERDES_CFG, val);
4160                                 }
4161
4162                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163                                 udelay(40);
4164
4165                                 /* Link parallel detection - link is up */
4166                                 /* only if we have PCS_SYNC and not */
4167                                 /* receiving config code words */
4168                                 mac_status = tr32(MAC_STATUS);
4169                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171                                         tg3_setup_flow_control(tp, 0, 0);
4172                                         current_link_up = 1;
4173                                         tp->phy_flags |=
4174                                                 TG3_PHYFLG_PARALLEL_DETECT;
4175                                         tp->serdes_counter =
4176                                                 SERDES_PARALLEL_DET_TIMEOUT;
4177                                 } else
4178                                         goto restart_autoneg;
4179                         }
4180                 }
4181         } else {
4182                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4184         }
4185
4186 out:
4187         return current_link_up;
4188 }
4189
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4191 {
4192         int current_link_up = 0;
4193
4194         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195                 goto out;
4196
4197         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198                 u32 txflags, rxflags;
4199                 int i;
4200
4201                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202                         u32 local_adv = 0, remote_adv = 0;
4203
4204                         if (txflags & ANEG_CFG_PS1)
4205                                 local_adv |= ADVERTISE_1000XPAUSE;
4206                         if (txflags & ANEG_CFG_PS2)
4207                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4208
4209                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210                                 remote_adv |= LPA_1000XPAUSE;
4211                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4213
4214                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4215
4216                         current_link_up = 1;
4217                 }
4218                 for (i = 0; i < 30; i++) {
4219                         udelay(20);
4220                         tw32_f(MAC_STATUS,
4221                                (MAC_STATUS_SYNC_CHANGED |
4222                                 MAC_STATUS_CFG_CHANGED));
4223                         udelay(40);
4224                         if ((tr32(MAC_STATUS) &
4225                              (MAC_STATUS_SYNC_CHANGED |
4226                               MAC_STATUS_CFG_CHANGED)) == 0)
4227                                 break;
4228                 }
4229
4230                 mac_status = tr32(MAC_STATUS);
4231                 if (current_link_up == 0 &&
4232                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233                     !(mac_status & MAC_STATUS_RCVD_CFG))
4234                         current_link_up = 1;
4235         } else {
4236                 tg3_setup_flow_control(tp, 0, 0);
4237
4238                 /* Forcing 1000FD link up. */
4239                 current_link_up = 1;
4240
4241                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242                 udelay(40);
4243
4244                 tw32_f(MAC_MODE, tp->mac_mode);
4245                 udelay(40);
4246         }
4247
4248 out:
4249         return current_link_up;
4250 }
4251
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4253 {
4254         u32 orig_pause_cfg;
4255         u16 orig_active_speed;
4256         u8 orig_active_duplex;
4257         u32 mac_status;
4258         int current_link_up;
4259         int i;
4260
4261         orig_pause_cfg = tp->link_config.active_flowctrl;
4262         orig_active_speed = tp->link_config.active_speed;
4263         orig_active_duplex = tp->link_config.active_duplex;
4264
4265         if (!tg3_flag(tp, HW_AUTONEG) &&
4266             netif_carrier_ok(tp->dev) &&
4267             tg3_flag(tp, INIT_COMPLETE)) {
4268                 mac_status = tr32(MAC_STATUS);
4269                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270                                MAC_STATUS_SIGNAL_DET |
4271                                MAC_STATUS_CFG_CHANGED |
4272                                MAC_STATUS_RCVD_CFG);
4273                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274                                    MAC_STATUS_SIGNAL_DET)) {
4275                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276                                             MAC_STATUS_CFG_CHANGED));
4277                         return 0;
4278                 }
4279         }
4280
4281         tw32_f(MAC_TX_AUTO_NEG, 0);
4282
4283         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285         tw32_f(MAC_MODE, tp->mac_mode);
4286         udelay(40);
4287
4288         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289                 tg3_init_bcm8002(tp);
4290
4291         /* Enable link change event even when serdes polling.  */
4292         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293         udelay(40);
4294
4295         current_link_up = 0;
4296         mac_status = tr32(MAC_STATUS);
4297
4298         if (tg3_flag(tp, HW_AUTONEG))
4299                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300         else
4301                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4302
4303         tp->napi[0].hw_status->status =
4304                 (SD_STATUS_UPDATED |
4305                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4306
4307         for (i = 0; i < 100; i++) {
4308                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309                                     MAC_STATUS_CFG_CHANGED));
4310                 udelay(5);
4311                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312                                          MAC_STATUS_CFG_CHANGED |
4313                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314                         break;
4315         }
4316
4317         mac_status = tr32(MAC_STATUS);
4318         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319                 current_link_up = 0;
4320                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321                     tp->serdes_counter == 0) {
4322                         tw32_f(MAC_MODE, (tp->mac_mode |
4323                                           MAC_MODE_SEND_CONFIGS));
4324                         udelay(1);
4325                         tw32_f(MAC_MODE, tp->mac_mode);
4326                 }
4327         }
4328
4329         if (current_link_up == 1) {
4330                 tp->link_config.active_speed = SPEED_1000;
4331                 tp->link_config.active_duplex = DUPLEX_FULL;
4332                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333                                     LED_CTRL_LNKLED_OVERRIDE |
4334                                     LED_CTRL_1000MBPS_ON));
4335         } else {
4336                 tp->link_config.active_speed = SPEED_INVALID;
4337                 tp->link_config.active_duplex = DUPLEX_INVALID;
4338                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339                                     LED_CTRL_LNKLED_OVERRIDE |
4340                                     LED_CTRL_TRAFFIC_OVERRIDE));
4341         }
4342
4343         if (current_link_up != netif_carrier_ok(tp->dev)) {
4344                 if (current_link_up)
4345                         netif_carrier_on(tp->dev);
4346                 else
4347                         netif_carrier_off(tp->dev);
4348                 tg3_link_report(tp);
4349         } else {
4350                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351                 if (orig_pause_cfg != now_pause_cfg ||
4352                     orig_active_speed != tp->link_config.active_speed ||
4353                     orig_active_duplex != tp->link_config.active_duplex)
4354                         tg3_link_report(tp);
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4361 {
4362         int current_link_up, err = 0;
4363         u32 bmsr, bmcr;
4364         u16 current_speed;
4365         u8 current_duplex;
4366         u32 local_adv, remote_adv;
4367
4368         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369         tw32_f(MAC_MODE, tp->mac_mode);
4370         udelay(40);
4371
4372         tw32(MAC_EVENT, 0);
4373
4374         tw32_f(MAC_STATUS,
4375              (MAC_STATUS_SYNC_CHANGED |
4376               MAC_STATUS_CFG_CHANGED |
4377               MAC_STATUS_MI_COMPLETION |
4378               MAC_STATUS_LNKSTATE_CHANGED));
4379         udelay(40);
4380
4381         if (force_reset)
4382                 tg3_phy_reset(tp);
4383
4384         current_link_up = 0;
4385         current_speed = SPEED_INVALID;
4386         current_duplex = DUPLEX_INVALID;
4387
4388         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392                         bmsr |= BMSR_LSTATUS;
4393                 else
4394                         bmsr &= ~BMSR_LSTATUS;
4395         }
4396
4397         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4398
4399         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401                 /* do nothing, just check for link up at the end */
4402         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403                 u32 adv, new_adv;
4404
4405                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407                                   ADVERTISE_1000XPAUSE |
4408                                   ADVERTISE_1000XPSE_ASYM |
4409                                   ADVERTISE_SLCT);
4410
4411                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4412
4413                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414                         new_adv |= ADVERTISE_1000XHALF;
4415                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416                         new_adv |= ADVERTISE_1000XFULL;
4417
4418                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421                         tg3_writephy(tp, MII_BMCR, bmcr);
4422
4423                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                         return err;
4428                 }
4429         } else {
4430                 u32 new_bmcr;
4431
4432                 bmcr &= ~BMCR_SPEED1000;
4433                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4434
4435                 if (tp->link_config.duplex == DUPLEX_FULL)
4436                         new_bmcr |= BMCR_FULLDPLX;
4437
4438                 if (new_bmcr != bmcr) {
4439                         /* BMCR_SPEED1000 is a reserved bit that needs
4440                          * to be set on write.
4441                          */
4442                         new_bmcr |= BMCR_SPEED1000;
4443
4444                         /* Force a linkdown */
4445                         if (netif_carrier_ok(tp->dev)) {
4446                                 u32 adv;
4447
4448                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449                                 adv &= ~(ADVERTISE_1000XFULL |
4450                                          ADVERTISE_1000XHALF |
4451                                          ADVERTISE_SLCT);
4452                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4453                                 tg3_writephy(tp, MII_BMCR, bmcr |
4454                                                            BMCR_ANRESTART |
4455                                                            BMCR_ANENABLE);
4456                                 udelay(10);
4457                                 netif_carrier_off(tp->dev);
4458                         }
4459                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4460                         bmcr = new_bmcr;
4461                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464                             ASIC_REV_5714) {
4465                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466                                         bmsr |= BMSR_LSTATUS;
4467                                 else
4468                                         bmsr &= ~BMSR_LSTATUS;
4469                         }
4470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4471                 }
4472         }
4473
4474         if (bmsr & BMSR_LSTATUS) {
4475                 current_speed = SPEED_1000;
4476                 current_link_up = 1;
4477                 if (bmcr & BMCR_FULLDPLX)
4478                         current_duplex = DUPLEX_FULL;
4479                 else
4480                         current_duplex = DUPLEX_HALF;
4481
4482                 local_adv = 0;
4483                 remote_adv = 0;
4484
4485                 if (bmcr & BMCR_ANENABLE) {
4486                         u32 common;
4487
4488                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490                         common = local_adv & remote_adv;
4491                         if (common & (ADVERTISE_1000XHALF |
4492                                       ADVERTISE_1000XFULL)) {
4493                                 if (common & ADVERTISE_1000XFULL)
4494                                         current_duplex = DUPLEX_FULL;
4495                                 else
4496                                         current_duplex = DUPLEX_HALF;
4497                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4498                                 /* Link is up via parallel detect */
4499                         } else {
4500                                 current_link_up = 0;
4501                         }
4502                 }
4503         }
4504
4505         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4507
4508         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509         if (tp->link_config.active_duplex == DUPLEX_HALF)
4510                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4511
4512         tw32_f(MAC_MODE, tp->mac_mode);
4513         udelay(40);
4514
4515         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4516
4517         tp->link_config.active_speed = current_speed;
4518         tp->link_config.active_duplex = current_duplex;
4519
4520         if (current_link_up != netif_carrier_ok(tp->dev)) {
4521                 if (current_link_up)
4522                         netif_carrier_on(tp->dev);
4523                 else {
4524                         netif_carrier_off(tp->dev);
4525                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4526                 }
4527                 tg3_link_report(tp);
4528         }
4529         return err;
4530 }
4531
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4533 {
4534         if (tp->serdes_counter) {
4535                 /* Give autoneg time to complete. */
4536                 tp->serdes_counter--;
4537                 return;
4538         }
4539
4540         if (!netif_carrier_ok(tp->dev) &&
4541             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542                 u32 bmcr;
4543
4544                 tg3_readphy(tp, MII_BMCR, &bmcr);
4545                 if (bmcr & BMCR_ANENABLE) {
4546                         u32 phy1, phy2;
4547
4548                         /* Select shadow register 0x1f */
4549                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4551
4552                         /* Select expansion interrupt status register */
4553                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554                                          MII_TG3_DSP_EXP1_INT_STAT);
4555                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557
4558                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559                                 /* We have signal detect and not receiving
4560                                  * config code words, link is up by parallel
4561                                  * detection.
4562                                  */
4563
4564                                 bmcr &= ~BMCR_ANENABLE;
4565                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566                                 tg3_writephy(tp, MII_BMCR, bmcr);
4567                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4568                         }
4569                 }
4570         } else if (netif_carrier_ok(tp->dev) &&
4571                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573                 u32 phy2;
4574
4575                 /* Select expansion interrupt status register */
4576                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577                                  MII_TG3_DSP_EXP1_INT_STAT);
4578                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579                 if (phy2 & 0x20) {
4580                         u32 bmcr;
4581
4582                         /* Config code words received, turn on autoneg. */
4583                         tg3_readphy(tp, MII_BMCR, &bmcr);
4584                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4585
4586                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4587
4588                 }
4589         }
4590 }
4591
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4593 {
4594         u32 val;
4595         int err;
4596
4597         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598                 err = tg3_setup_fiber_phy(tp, force_reset);
4599         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601         else
4602                 err = tg3_setup_copper_phy(tp, force_reset);
4603
4604         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605                 u32 scale;
4606
4607                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609                         scale = 65;
4610                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611                         scale = 6;
4612                 else
4613                         scale = 12;
4614
4615                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617                 tw32(GRC_MISC_CFG, val);
4618         }
4619
4620         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621               (6 << TX_LENGTHS_IPG_SHIFT);
4622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623                 val |= tr32(MAC_TX_LENGTHS) &
4624                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4626
4627         if (tp->link_config.active_speed == SPEED_1000 &&
4628             tp->link_config.active_duplex == DUPLEX_HALF)
4629                 tw32(MAC_TX_LENGTHS, val |
4630                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631         else
4632                 tw32(MAC_TX_LENGTHS, val |
4633                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4634
4635         if (!tg3_flag(tp, 5705_PLUS)) {
4636                 if (netif_carrier_ok(tp->dev)) {
4637                         tw32(HOSTCC_STAT_COAL_TICKS,
4638                              tp->coal.stats_block_coalesce_usecs);
4639                 } else {
4640                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4641                 }
4642         }
4643
4644         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645                 val = tr32(PCIE_PWR_MGMT_THRESH);
4646                 if (!netif_carrier_ok(tp->dev))
4647                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648                               tp->pwrmgmt_thresh;
4649                 else
4650                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651                 tw32(PCIE_PWR_MGMT_THRESH, val);
4652         }
4653
4654         return err;
4655 }
4656
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4658 {
4659         return tp->irq_sync;
4660 }
4661
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4663 {
4664         int i;
4665
4666         dst = (u32 *)((u8 *)dst + off);
4667         for (i = 0; i < len; i += sizeof(u32))
4668                 *dst++ = tr32(off + i);
4669 }
4670
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4672 {
4673         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4692
4693         if (tg3_flag(tp, SUPPORT_MSIX))
4694                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4695
4696         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4704
4705         if (!tg3_flag(tp, 5705_PLUS)) {
4706                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4709         }
4710
4711         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4716
4717         if (tg3_flag(tp, NVRAM))
4718                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4719 }
4720
4721 static void tg3_dump_state(struct tg3 *tp)
4722 {
4723         int i;
4724         u32 *regs;
4725
4726         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727         if (!regs) {
4728                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729                 return;
4730         }
4731
4732         if (tg3_flag(tp, PCI_EXPRESS)) {
4733                 /* Read up to but not including private PCI registers */
4734                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735                         regs[i / sizeof(u32)] = tr32(i);
4736         } else
4737                 tg3_dump_legacy_regs(tp, regs);
4738
4739         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740                 if (!regs[i + 0] && !regs[i + 1] &&
4741                     !regs[i + 2] && !regs[i + 3])
4742                         continue;
4743
4744                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745                            i * 4,
4746                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4747         }
4748
4749         kfree(regs);
4750
4751         for (i = 0; i < tp->irq_cnt; i++) {
4752                 struct tg3_napi *tnapi = &tp->napi[i];
4753
4754                 /* SW status block */
4755                 netdev_err(tp->dev,
4756                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4757                            i,
4758                            tnapi->hw_status->status,
4759                            tnapi->hw_status->status_tag,
4760                            tnapi->hw_status->rx_jumbo_consumer,
4761                            tnapi->hw_status->rx_consumer,
4762                            tnapi->hw_status->rx_mini_consumer,
4763                            tnapi->hw_status->idx[0].rx_producer,
4764                            tnapi->hw_status->idx[0].tx_consumer);
4765
4766                 netdev_err(tp->dev,
4767                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4768                            i,
4769                            tnapi->last_tag, tnapi->last_irq_tag,
4770                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771                            tnapi->rx_rcb_ptr,
4772                            tnapi->prodring.rx_std_prod_idx,
4773                            tnapi->prodring.rx_std_cons_idx,
4774                            tnapi->prodring.rx_jmb_prod_idx,
4775                            tnapi->prodring.rx_jmb_cons_idx);
4776         }
4777 }
4778
4779 /* This is called whenever we suspect that the system chipset is re-
4780  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781  * is bogus tx completions. We try to recover by setting the
4782  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783  * in the workqueue.
4784  */
4785 static void tg3_tx_recover(struct tg3 *tp)
4786 {
4787         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4789
4790         netdev_warn(tp->dev,
4791                     "The system may be re-ordering memory-mapped I/O "
4792                     "cycles to the network device, attempting to recover. "
4793                     "Please report the problem to the driver maintainer "
4794                     "and include system chipset information.\n");
4795
4796         spin_lock(&tp->lock);
4797         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798         spin_unlock(&tp->lock);
4799 }
4800
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4802 {
4803         /* Tell compiler to fetch tx indices from memory. */
4804         barrier();
4805         return tnapi->tx_pending -
4806                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4807 }
4808
4809 /* Tigon3 never reports partial packet sends.  So we do not
4810  * need special logic to handle SKBs that have not had all
4811  * of their frags sent yet, like SunGEM does.
4812  */
4813 static void tg3_tx(struct tg3_napi *tnapi)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817         u32 sw_idx = tnapi->tx_cons;
4818         struct netdev_queue *txq;
4819         int index = tnapi - tp->napi;
4820
4821         if (tg3_flag(tp, ENABLE_TSS))
4822                 index--;
4823
4824         txq = netdev_get_tx_queue(tp->dev, index);
4825
4826         while (sw_idx != hw_idx) {
4827                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828                 struct sk_buff *skb = ri->skb;
4829                 int i, tx_bug = 0;
4830
4831                 if (unlikely(skb == NULL)) {
4832                         tg3_tx_recover(tp);
4833                         return;
4834                 }
4835
4836                 pci_unmap_single(tp->pdev,
4837                                  dma_unmap_addr(ri, mapping),
4838                                  skb_headlen(skb),
4839                                  PCI_DMA_TODEVICE);
4840
4841                 ri->skb = NULL;
4842
4843                 sw_idx = NEXT_TX(sw_idx);
4844
4845                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4846                         ri = &tnapi->tx_buffers[sw_idx];
4847                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4848                                 tx_bug = 1;
4849
4850                         pci_unmap_page(tp->pdev,
4851                                        dma_unmap_addr(ri, mapping),
4852                                        skb_shinfo(skb)->frags[i].size,
4853                                        PCI_DMA_TODEVICE);
4854                         sw_idx = NEXT_TX(sw_idx);
4855                 }
4856
4857                 dev_kfree_skb(skb);
4858
4859                 if (unlikely(tx_bug)) {
4860                         tg3_tx_recover(tp);
4861                         return;
4862                 }
4863         }
4864
4865         tnapi->tx_cons = sw_idx;
4866
4867         /* Need to make the tx_cons update visible to tg3_start_xmit()
4868          * before checking for netif_queue_stopped().  Without the
4869          * memory barrier, there is a small possibility that tg3_start_xmit()
4870          * will miss it and cause the queue to be stopped forever.
4871          */
4872         smp_mb();
4873
4874         if (unlikely(netif_tx_queue_stopped(txq) &&
4875                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4876                 __netif_tx_lock(txq, smp_processor_id());
4877                 if (netif_tx_queue_stopped(txq) &&
4878                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4879                         netif_tx_wake_queue(txq);
4880                 __netif_tx_unlock(txq);
4881         }
4882 }
4883
4884 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4885 {
4886         if (!ri->skb)
4887                 return;
4888
4889         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4890                          map_sz, PCI_DMA_FROMDEVICE);
4891         dev_kfree_skb_any(ri->skb);
4892         ri->skb = NULL;
4893 }
4894
4895 /* Returns size of skb allocated or < 0 on error.
4896  *
4897  * We only need to fill in the address because the other members
4898  * of the RX descriptor are invariant, see tg3_init_rings.
4899  *
4900  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4901  * posting buffers we only dirty the first cache line of the RX
4902  * descriptor (containing the address).  Whereas for the RX status
4903  * buffers the cpu only reads the last cacheline of the RX descriptor
4904  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4905  */
4906 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4907                             u32 opaque_key, u32 dest_idx_unmasked)
4908 {
4909         struct tg3_rx_buffer_desc *desc;
4910         struct ring_info *map;
4911         struct sk_buff *skb;
4912         dma_addr_t mapping;
4913         int skb_size, dest_idx;
4914
4915         switch (opaque_key) {
4916         case RXD_OPAQUE_RING_STD:
4917                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4918                 desc = &tpr->rx_std[dest_idx];
4919                 map = &tpr->rx_std_buffers[dest_idx];
4920                 skb_size = tp->rx_pkt_map_sz;
4921                 break;
4922
4923         case RXD_OPAQUE_RING_JUMBO:
4924                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4925                 desc = &tpr->rx_jmb[dest_idx].std;
4926                 map = &tpr->rx_jmb_buffers[dest_idx];
4927                 skb_size = TG3_RX_JMB_MAP_SZ;
4928                 break;
4929
4930         default:
4931                 return -EINVAL;
4932         }
4933
4934         /* Do not overwrite any of the map or rp information
4935          * until we are sure we can commit to a new buffer.
4936          *
4937          * Callers depend upon this behavior and assume that
4938          * we leave everything unchanged if we fail.
4939          */
4940         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4941         if (skb == NULL)
4942                 return -ENOMEM;
4943
4944         skb_reserve(skb, tp->rx_offset);
4945
4946         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4947                                  PCI_DMA_FROMDEVICE);
4948         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4949                 dev_kfree_skb(skb);
4950                 return -EIO;
4951         }
4952
4953         map->skb = skb;
4954         dma_unmap_addr_set(map, mapping, mapping);
4955
4956         desc->addr_hi = ((u64)mapping >> 32);
4957         desc->addr_lo = ((u64)mapping & 0xffffffff);
4958
4959         return skb_size;
4960 }
4961
4962 /* We only need to move over in the address because the other
4963  * members of the RX descriptor are invariant.  See notes above
4964  * tg3_alloc_rx_skb for full details.
4965  */
4966 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4967                            struct tg3_rx_prodring_set *dpr,
4968                            u32 opaque_key, int src_idx,
4969                            u32 dest_idx_unmasked)
4970 {
4971         struct tg3 *tp = tnapi->tp;
4972         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4973         struct ring_info *src_map, *dest_map;
4974         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4975         int dest_idx;
4976
4977         switch (opaque_key) {
4978         case RXD_OPAQUE_RING_STD:
4979                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4980                 dest_desc = &dpr->rx_std[dest_idx];
4981                 dest_map = &dpr->rx_std_buffers[dest_idx];
4982                 src_desc = &spr->rx_std[src_idx];
4983                 src_map = &spr->rx_std_buffers[src_idx];
4984                 break;
4985
4986         case RXD_OPAQUE_RING_JUMBO:
4987                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4988                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4989                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4990                 src_desc = &spr->rx_jmb[src_idx].std;
4991                 src_map = &spr->rx_jmb_buffers[src_idx];
4992                 break;
4993
4994         default:
4995                 return;
4996         }
4997
4998         dest_map->skb = src_map->skb;
4999         dma_unmap_addr_set(dest_map, mapping,
5000                            dma_unmap_addr(src_map, mapping));
5001         dest_desc->addr_hi = src_desc->addr_hi;
5002         dest_desc->addr_lo = src_desc->addr_lo;
5003
5004         /* Ensure that the update to the skb happens after the physical
5005          * addresses have been transferred to the new BD location.
5006          */
5007         smp_wmb();
5008
5009         src_map->skb = NULL;
5010 }
5011
5012 /* The RX ring scheme is composed of multiple rings which post fresh
5013  * buffers to the chip, and one special ring the chip uses to report
5014  * status back to the host.
5015  *
5016  * The special ring reports the status of received packets to the
5017  * host.  The chip does not write into the original descriptor the
5018  * RX buffer was obtained from.  The chip simply takes the original
5019  * descriptor as provided by the host, updates the status and length
5020  * field, then writes this into the next status ring entry.
5021  *
5022  * Each ring the host uses to post buffers to the chip is described
5023  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5024  * it is first placed into the on-chip ram.  When the packet's length
5025  * is known, it walks down the TG3_BDINFO entries to select the ring.
5026  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5027  * which is within the range of the new packet's length is chosen.
5028  *
5029  * The "separate ring for rx status" scheme may sound queer, but it makes
5030  * sense from a cache coherency perspective.  If only the host writes
5031  * to the buffer post rings, and only the chip writes to the rx status
5032  * rings, then cache lines never move beyond shared-modified state.
5033  * If both the host and chip were to write into the same ring, cache line
5034  * eviction could occur since both entities want it in an exclusive state.
5035  */
5036 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5037 {
5038         struct tg3 *tp = tnapi->tp;
5039         u32 work_mask, rx_std_posted = 0;
5040         u32 std_prod_idx, jmb_prod_idx;
5041         u32 sw_idx = tnapi->rx_rcb_ptr;
5042         u16 hw_idx;
5043         int received;
5044         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5045
5046         hw_idx = *(tnapi->rx_rcb_prod_idx);
5047         /*
5048          * We need to order the read of hw_idx and the read of
5049          * the opaque cookie.
5050          */
5051         rmb();
5052         work_mask = 0;
5053         received = 0;
5054         std_prod_idx = tpr->rx_std_prod_idx;
5055         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5056         while (sw_idx != hw_idx && budget > 0) {
5057                 struct ring_info *ri;
5058                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5059                 unsigned int len;
5060                 struct sk_buff *skb;
5061                 dma_addr_t dma_addr;
5062                 u32 opaque_key, desc_idx, *post_ptr;
5063
5064                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5065                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5066                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5067                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5068                         dma_addr = dma_unmap_addr(ri, mapping);
5069                         skb = ri->skb;
5070                         post_ptr = &std_prod_idx;
5071                         rx_std_posted++;
5072                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5073                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5074                         dma_addr = dma_unmap_addr(ri, mapping);
5075                         skb = ri->skb;
5076                         post_ptr = &jmb_prod_idx;
5077                 } else
5078                         goto next_pkt_nopost;
5079
5080                 work_mask |= opaque_key;
5081
5082                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5083                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5084                 drop_it:
5085                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5086                                        desc_idx, *post_ptr);
5087                 drop_it_no_recycle:
5088                         /* Other statistics kept track of by card. */
5089                         tp->rx_dropped++;
5090                         goto next_pkt;
5091                 }
5092
5093                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5094                       ETH_FCS_LEN;
5095
5096                 if (len > TG3_RX_COPY_THRESH(tp)) {
5097                         int skb_size;
5098
5099                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5100                                                     *post_ptr);
5101                         if (skb_size < 0)
5102                                 goto drop_it;
5103
5104                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5105                                          PCI_DMA_FROMDEVICE);
5106
5107                         /* Ensure that the update to the skb happens
5108                          * after the usage of the old DMA mapping.
5109                          */
5110                         smp_wmb();
5111
5112                         ri->skb = NULL;
5113
5114                         skb_put(skb, len);
5115                 } else {
5116                         struct sk_buff *copy_skb;
5117
5118                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5119                                        desc_idx, *post_ptr);
5120
5121                         copy_skb = netdev_alloc_skb(tp->dev, len +
5122                                                     TG3_RAW_IP_ALIGN);
5123                         if (copy_skb == NULL)
5124                                 goto drop_it_no_recycle;
5125
5126                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5127                         skb_put(copy_skb, len);
5128                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5129                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5130                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5131
5132                         /* We'll reuse the original ring buffer. */
5133                         skb = copy_skb;
5134                 }
5135
5136                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5137                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5138                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5139                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5140                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5141                 else
5142                         skb_checksum_none_assert(skb);
5143
5144                 skb->protocol = eth_type_trans(skb, tp->dev);
5145
5146                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5147                     skb->protocol != htons(ETH_P_8021Q)) {
5148                         dev_kfree_skb(skb);
5149                         goto drop_it_no_recycle;
5150                 }
5151
5152                 if (desc->type_flags & RXD_FLAG_VLAN &&
5153                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5154                         __vlan_hwaccel_put_tag(skb,
5155                                                desc->err_vlan & RXD_VLAN_MASK);
5156
5157                 napi_gro_receive(&tnapi->napi, skb);
5158
5159                 received++;
5160                 budget--;
5161
5162 next_pkt:
5163                 (*post_ptr)++;
5164
5165                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5166                         tpr->rx_std_prod_idx = std_prod_idx &
5167                                                tp->rx_std_ring_mask;
5168                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5169                                      tpr->rx_std_prod_idx);
5170                         work_mask &= ~RXD_OPAQUE_RING_STD;
5171                         rx_std_posted = 0;
5172                 }
5173 next_pkt_nopost:
5174                 sw_idx++;
5175                 sw_idx &= tp->rx_ret_ring_mask;
5176
5177                 /* Refresh hw_idx to see if there is new work */
5178                 if (sw_idx == hw_idx) {
5179                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5180                         rmb();
5181                 }
5182         }
5183
5184         /* ACK the status ring. */
5185         tnapi->rx_rcb_ptr = sw_idx;
5186         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5187
5188         /* Refill RX ring(s). */
5189         if (!tg3_flag(tp, ENABLE_RSS)) {
5190                 if (work_mask & RXD_OPAQUE_RING_STD) {
5191                         tpr->rx_std_prod_idx = std_prod_idx &
5192                                                tp->rx_std_ring_mask;
5193                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5194                                      tpr->rx_std_prod_idx);
5195                 }
5196                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5197                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5198                                                tp->rx_jmb_ring_mask;
5199                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5200                                      tpr->rx_jmb_prod_idx);
5201                 }
5202                 mmiowb();
5203         } else if (work_mask) {
5204                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5205                  * updated before the producer indices can be updated.
5206                  */
5207                 smp_wmb();
5208
5209                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5210                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5211
5212                 if (tnapi != &tp->napi[1])
5213                         napi_schedule(&tp->napi[1].napi);
5214         }
5215
5216         return received;
5217 }
5218
5219 static void tg3_poll_link(struct tg3 *tp)
5220 {
5221         /* handle link change and other phy events */
5222         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5223                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5224
5225                 if (sblk->status & SD_STATUS_LINK_CHG) {
5226                         sblk->status = SD_STATUS_UPDATED |
5227                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5228                         spin_lock(&tp->lock);
5229                         if (tg3_flag(tp, USE_PHYLIB)) {
5230                                 tw32_f(MAC_STATUS,
5231                                      (MAC_STATUS_SYNC_CHANGED |
5232                                       MAC_STATUS_CFG_CHANGED |
5233                                       MAC_STATUS_MI_COMPLETION |
5234                                       MAC_STATUS_LNKSTATE_CHANGED));
5235                                 udelay(40);
5236                         } else
5237                                 tg3_setup_phy(tp, 0);
5238                         spin_unlock(&tp->lock);
5239                 }
5240         }
5241 }
5242
5243 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5244                                 struct tg3_rx_prodring_set *dpr,
5245                                 struct tg3_rx_prodring_set *spr)
5246 {
5247         u32 si, di, cpycnt, src_prod_idx;
5248         int i, err = 0;
5249
5250         while (1) {
5251                 src_prod_idx = spr->rx_std_prod_idx;
5252
5253                 /* Make sure updates to the rx_std_buffers[] entries and the
5254                  * standard producer index are seen in the correct order.
5255                  */
5256                 smp_rmb();
5257
5258                 if (spr->rx_std_cons_idx == src_prod_idx)
5259                         break;
5260
5261                 if (spr->rx_std_cons_idx < src_prod_idx)
5262                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5263                 else
5264                         cpycnt = tp->rx_std_ring_mask + 1 -
5265                                  spr->rx_std_cons_idx;
5266
5267                 cpycnt = min(cpycnt,
5268                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5269
5270                 si = spr->rx_std_cons_idx;
5271                 di = dpr->rx_std_prod_idx;
5272
5273                 for (i = di; i < di + cpycnt; i++) {
5274                         if (dpr->rx_std_buffers[i].skb) {
5275                                 cpycnt = i - di;
5276                                 err = -ENOSPC;
5277                                 break;
5278                         }
5279                 }
5280
5281                 if (!cpycnt)
5282                         break;
5283
5284                 /* Ensure that updates to the rx_std_buffers ring and the
5285                  * shadowed hardware producer ring from tg3_recycle_skb() are
5286                  * ordered correctly WRT the skb check above.
5287                  */
5288                 smp_rmb();
5289
5290                 memcpy(&dpr->rx_std_buffers[di],
5291                        &spr->rx_std_buffers[si],
5292                        cpycnt * sizeof(struct ring_info));
5293
5294                 for (i = 0; i < cpycnt; i++, di++, si++) {
5295                         struct tg3_rx_buffer_desc *sbd, *dbd;
5296                         sbd = &spr->rx_std[si];
5297                         dbd = &dpr->rx_std[di];
5298                         dbd->addr_hi = sbd->addr_hi;
5299                         dbd->addr_lo = sbd->addr_lo;
5300                 }
5301
5302                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5303                                        tp->rx_std_ring_mask;
5304                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5305                                        tp->rx_std_ring_mask;
5306         }
5307
5308         while (1) {
5309                 src_prod_idx = spr->rx_jmb_prod_idx;
5310
5311                 /* Make sure updates to the rx_jmb_buffers[] entries and
5312                  * the jumbo producer index are seen in the correct order.
5313                  */
5314                 smp_rmb();
5315
5316                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5317                         break;
5318
5319                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5320                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5321                 else
5322                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5323                                  spr->rx_jmb_cons_idx;
5324
5325                 cpycnt = min(cpycnt,
5326                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5327
5328                 si = spr->rx_jmb_cons_idx;
5329                 di = dpr->rx_jmb_prod_idx;
5330
5331                 for (i = di; i < di + cpycnt; i++) {
5332                         if (dpr->rx_jmb_buffers[i].skb) {
5333                                 cpycnt = i - di;
5334                                 err = -ENOSPC;
5335                                 break;
5336                         }
5337                 }
5338
5339                 if (!cpycnt)
5340                         break;
5341
5342                 /* Ensure that updates to the rx_jmb_buffers ring and the
5343                  * shadowed hardware producer ring from tg3_recycle_skb() are
5344                  * ordered correctly WRT the skb check above.
5345                  */
5346                 smp_rmb();
5347
5348                 memcpy(&dpr->rx_jmb_buffers[di],
5349                        &spr->rx_jmb_buffers[si],
5350                        cpycnt * sizeof(struct ring_info));
5351
5352                 for (i = 0; i < cpycnt; i++, di++, si++) {
5353                         struct tg3_rx_buffer_desc *sbd, *dbd;
5354                         sbd = &spr->rx_jmb[si].std;
5355                         dbd = &dpr->rx_jmb[di].std;
5356                         dbd->addr_hi = sbd->addr_hi;
5357                         dbd->addr_lo = sbd->addr_lo;
5358                 }
5359
5360                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5361                                        tp->rx_jmb_ring_mask;
5362                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5363                                        tp->rx_jmb_ring_mask;
5364         }
5365
5366         return err;
5367 }
5368
5369 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5370 {
5371         struct tg3 *tp = tnapi->tp;
5372
5373         /* run TX completion thread */
5374         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5375                 tg3_tx(tnapi);
5376                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5377                         return work_done;
5378         }
5379
5380         /* run RX thread, within the bounds set by NAPI.
5381          * All RX "locking" is done by ensuring outside
5382          * code synchronizes with tg3->napi.poll()
5383          */
5384         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5385                 work_done += tg3_rx(tnapi, budget - work_done);
5386
5387         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5388                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5389                 int i, err = 0;
5390                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5391                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5392
5393                 for (i = 1; i < tp->irq_cnt; i++)
5394                         err |= tg3_rx_prodring_xfer(tp, dpr,
5395                                                     &tp->napi[i].prodring);
5396
5397                 wmb();
5398
5399                 if (std_prod_idx != dpr->rx_std_prod_idx)
5400                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5401                                      dpr->rx_std_prod_idx);
5402
5403                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5404                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5405                                      dpr->rx_jmb_prod_idx);
5406
5407                 mmiowb();
5408
5409                 if (err)
5410                         tw32_f(HOSTCC_MODE, tp->coal_now);
5411         }
5412
5413         return work_done;
5414 }
5415
5416 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5417 {
5418         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5419         struct tg3 *tp = tnapi->tp;
5420         int work_done = 0;
5421         struct tg3_hw_status *sblk = tnapi->hw_status;
5422
5423         while (1) {
5424                 work_done = tg3_poll_work(tnapi, work_done, budget);
5425
5426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5427                         goto tx_recovery;
5428
5429                 if (unlikely(work_done >= budget))
5430                         break;
5431
5432                 /* tp->last_tag is used in tg3_int_reenable() below
5433                  * to tell the hw how much work has been processed,
5434                  * so we must read it before checking for more work.
5435                  */
5436                 tnapi->last_tag = sblk->status_tag;
5437                 tnapi->last_irq_tag = tnapi->last_tag;
5438                 rmb();
5439
5440                 /* check for RX/TX work to do */
5441                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5442                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5443                         napi_complete(napi);
5444                         /* Reenable interrupts. */
5445                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5446                         mmiowb();
5447                         break;
5448                 }
5449         }
5450
5451         return work_done;
5452
5453 tx_recovery:
5454         /* work_done is guaranteed to be less than budget. */
5455         napi_complete(napi);
5456         schedule_work(&tp->reset_task);
5457         return work_done;
5458 }
5459
5460 static void tg3_process_error(struct tg3 *tp)
5461 {
5462         u32 val;
5463         bool real_error = false;
5464
5465         if (tg3_flag(tp, ERROR_PROCESSED))
5466                 return;
5467
5468         /* Check Flow Attention register */
5469         val = tr32(HOSTCC_FLOW_ATTN);
5470         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5471                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5472                 real_error = true;
5473         }
5474
5475         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5476                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5477                 real_error = true;
5478         }
5479
5480         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5481                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5482                 real_error = true;
5483         }
5484
5485         if (!real_error)
5486                 return;
5487
5488         tg3_dump_state(tp);
5489
5490         tg3_flag_set(tp, ERROR_PROCESSED);
5491         schedule_work(&tp->reset_task);
5492 }
5493
5494 static int tg3_poll(struct napi_struct *napi, int budget)
5495 {
5496         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5497         struct tg3 *tp = tnapi->tp;
5498         int work_done = 0;
5499         struct tg3_hw_status *sblk = tnapi->hw_status;
5500
5501         while (1) {
5502                 if (sblk->status & SD_STATUS_ERROR)
5503                         tg3_process_error(tp);
5504
5505                 tg3_poll_link(tp);
5506
5507                 work_done = tg3_poll_work(tnapi, work_done, budget);
5508
5509                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5510                         goto tx_recovery;
5511
5512                 if (unlikely(work_done >= budget))
5513                         break;
5514
5515                 if (tg3_flag(tp, TAGGED_STATUS)) {
5516                         /* tp->last_tag is used in tg3_int_reenable() below
5517                          * to tell the hw how much work has been processed,
5518                          * so we must read it before checking for more work.
5519                          */
5520                         tnapi->last_tag = sblk->status_tag;
5521                         tnapi->last_irq_tag = tnapi->last_tag;
5522                         rmb();
5523                 } else
5524                         sblk->status &= ~SD_STATUS_UPDATED;
5525
5526                 if (likely(!tg3_has_work(tnapi))) {
5527                         napi_complete(napi);
5528                         tg3_int_reenable(tnapi);
5529                         break;
5530                 }
5531         }
5532
5533         return work_done;
5534
5535 tx_recovery:
5536         /* work_done is guaranteed to be less than budget. */
5537         napi_complete(napi);
5538         schedule_work(&tp->reset_task);
5539         return work_done;
5540 }
5541
5542 static void tg3_napi_disable(struct tg3 *tp)
5543 {
5544         int i;
5545
5546         for (i = tp->irq_cnt - 1; i >= 0; i--)
5547                 napi_disable(&tp->napi[i].napi);
5548 }
5549
5550 static void tg3_napi_enable(struct tg3 *tp)
5551 {
5552         int i;
5553
5554         for (i = 0; i < tp->irq_cnt; i++)
5555                 napi_enable(&tp->napi[i].napi);
5556 }
5557
5558 static void tg3_napi_init(struct tg3 *tp)
5559 {
5560         int i;
5561
5562         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5563         for (i = 1; i < tp->irq_cnt; i++)
5564                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5565 }
5566
5567 static void tg3_napi_fini(struct tg3 *tp)
5568 {
5569         int i;
5570
5571         for (i = 0; i < tp->irq_cnt; i++)
5572                 netif_napi_del(&tp->napi[i].napi);
5573 }
5574
5575 static inline void tg3_netif_stop(struct tg3 *tp)
5576 {
5577         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5578         tg3_napi_disable(tp);
5579         netif_tx_disable(tp->dev);
5580 }
5581
5582 static inline void tg3_netif_start(struct tg3 *tp)
5583 {
5584         /* NOTE: unconditional netif_tx_wake_all_queues is only
5585          * appropriate so long as all callers are assured to
5586          * have free tx slots (such as after tg3_init_hw)
5587          */
5588         netif_tx_wake_all_queues(tp->dev);
5589
5590         tg3_napi_enable(tp);
5591         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5592         tg3_enable_ints(tp);
5593 }
5594
5595 static void tg3_irq_quiesce(struct tg3 *tp)
5596 {
5597         int i;
5598
5599         BUG_ON(tp->irq_sync);
5600
5601         tp->irq_sync = 1;
5602         smp_mb();
5603
5604         for (i = 0; i < tp->irq_cnt; i++)
5605                 synchronize_irq(tp->napi[i].irq_vec);
5606 }
5607
5608 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5609  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5610  * with as well.  Most of the time, this is not necessary except when
5611  * shutting down the device.
5612  */
5613 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5614 {
5615         spin_lock_bh(&tp->lock);
5616         if (irq_sync)
5617                 tg3_irq_quiesce(tp);
5618 }
5619
5620 static inline void tg3_full_unlock(struct tg3 *tp)
5621 {
5622         spin_unlock_bh(&tp->lock);
5623 }
5624
5625 /* One-shot MSI handler - Chip automatically disables interrupt
5626  * after sending MSI so driver doesn't have to do it.
5627  */
5628 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5629 {
5630         struct tg3_napi *tnapi = dev_id;
5631         struct tg3 *tp = tnapi->tp;
5632
5633         prefetch(tnapi->hw_status);
5634         if (tnapi->rx_rcb)
5635                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5636
5637         if (likely(!tg3_irq_sync(tp)))
5638                 napi_schedule(&tnapi->napi);
5639
5640         return IRQ_HANDLED;
5641 }
5642
5643 /* MSI ISR - No need to check for interrupt sharing and no need to
5644  * flush status block and interrupt mailbox. PCI ordering rules
5645  * guarantee that MSI will arrive after the status block.
5646  */
5647 static irqreturn_t tg3_msi(int irq, void *dev_id)
5648 {
5649         struct tg3_napi *tnapi = dev_id;
5650         struct tg3 *tp = tnapi->tp;
5651
5652         prefetch(tnapi->hw_status);
5653         if (tnapi->rx_rcb)
5654                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5655         /*
5656          * Writing any value to intr-mbox-0 clears PCI INTA# and
5657          * chip-internal interrupt pending events.
5658          * Writing non-zero to intr-mbox-0 additional tells the
5659          * NIC to stop sending us irqs, engaging "in-intr-handler"
5660          * event coalescing.
5661          */
5662         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5663         if (likely(!tg3_irq_sync(tp)))
5664                 napi_schedule(&tnapi->napi);
5665
5666         return IRQ_RETVAL(1);
5667 }
5668
5669 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5670 {
5671         struct tg3_napi *tnapi = dev_id;
5672         struct tg3 *tp = tnapi->tp;
5673         struct tg3_hw_status *sblk = tnapi->hw_status;
5674         unsigned int handled = 1;
5675
5676         /* In INTx mode, it is possible for the interrupt to arrive at
5677          * the CPU before the status block posted prior to the interrupt.
5678          * Reading the PCI State register will confirm whether the
5679          * interrupt is ours and will flush the status block.
5680          */
5681         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5682                 if (tg3_flag(tp, CHIP_RESETTING) ||
5683                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5684                         handled = 0;
5685                         goto out;
5686                 }
5687         }
5688
5689         /*
5690          * Writing any value to intr-mbox-0 clears PCI INTA# and
5691          * chip-internal interrupt pending events.
5692          * Writing non-zero to intr-mbox-0 additional tells the
5693          * NIC to stop sending us irqs, engaging "in-intr-handler"
5694          * event coalescing.
5695          *
5696          * Flush the mailbox to de-assert the IRQ immediately to prevent
5697          * spurious interrupts.  The flush impacts performance but
5698          * excessive spurious interrupts can be worse in some cases.
5699          */
5700         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5701         if (tg3_irq_sync(tp))
5702                 goto out;
5703         sblk->status &= ~SD_STATUS_UPDATED;
5704         if (likely(tg3_has_work(tnapi))) {
5705                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5706                 napi_schedule(&tnapi->napi);
5707         } else {
5708                 /* No work, shared interrupt perhaps?  re-enable
5709                  * interrupts, and flush that PCI write
5710                  */
5711                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5712                                0x00000000);
5713         }
5714 out:
5715         return IRQ_RETVAL(handled);
5716 }
5717
5718 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5719 {
5720         struct tg3_napi *tnapi = dev_id;
5721         struct tg3 *tp = tnapi->tp;
5722         struct tg3_hw_status *sblk = tnapi->hw_status;
5723         unsigned int handled = 1;
5724
5725         /* In INTx mode, it is possible for the interrupt to arrive at
5726          * the CPU before the status block posted prior to the interrupt.
5727          * Reading the PCI State register will confirm whether the
5728          * interrupt is ours and will flush the status block.
5729          */
5730         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5731                 if (tg3_flag(tp, CHIP_RESETTING) ||
5732                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5733                         handled = 0;
5734                         goto out;
5735                 }
5736         }
5737
5738         /*
5739          * writing any value to intr-mbox-0 clears PCI INTA# and
5740          * chip-internal interrupt pending events.
5741          * writing non-zero to intr-mbox-0 additional tells the
5742          * NIC to stop sending us irqs, engaging "in-intr-handler"
5743          * event coalescing.
5744          *
5745          * Flush the mailbox to de-assert the IRQ immediately to prevent
5746          * spurious interrupts.  The flush impacts performance but
5747          * excessive spurious interrupts can be worse in some cases.
5748          */
5749         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5750
5751         /*
5752          * In a shared interrupt configuration, sometimes other devices'
5753          * interrupts will scream.  We record the current status tag here
5754          * so that the above check can report that the screaming interrupts
5755          * are unhandled.  Eventually they will be silenced.
5756          */
5757         tnapi->last_irq_tag = sblk->status_tag;
5758
5759         if (tg3_irq_sync(tp))
5760                 goto out;
5761
5762         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5763
5764         napi_schedule(&tnapi->napi);
5765
5766 out:
5767         return IRQ_RETVAL(handled);
5768 }
5769
5770 /* ISR for interrupt test */
5771 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5772 {
5773         struct tg3_napi *tnapi = dev_id;
5774         struct tg3 *tp = tnapi->tp;
5775         struct tg3_hw_status *sblk = tnapi->hw_status;
5776
5777         if ((sblk->status & SD_STATUS_UPDATED) ||
5778             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5779                 tg3_disable_ints(tp);
5780                 return IRQ_RETVAL(1);
5781         }
5782         return IRQ_RETVAL(0);
5783 }
5784
5785 static int tg3_init_hw(struct tg3 *, int);
5786 static int tg3_halt(struct tg3 *, int, int);
5787
5788 /* Restart hardware after configuration changes, self-test, etc.
5789  * Invoked with tp->lock held.
5790  */
5791 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5792         __releases(tp->lock)
5793         __acquires(tp->lock)
5794 {
5795         int err;
5796
5797         err = tg3_init_hw(tp, reset_phy);
5798         if (err) {
5799                 netdev_err(tp->dev,
5800                            "Failed to re-initialize device, aborting\n");
5801                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5802                 tg3_full_unlock(tp);
5803                 del_timer_sync(&tp->timer);
5804                 tp->irq_sync = 0;
5805                 tg3_napi_enable(tp);
5806                 dev_close(tp->dev);
5807                 tg3_full_lock(tp, 0);
5808         }
5809         return err;
5810 }
5811
5812 #ifdef CONFIG_NET_POLL_CONTROLLER
5813 static void tg3_poll_controller(struct net_device *dev)
5814 {
5815         int i;
5816         struct tg3 *tp = netdev_priv(dev);
5817
5818         for (i = 0; i < tp->irq_cnt; i++)
5819                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5820 }
5821 #endif
5822
5823 static void tg3_reset_task(struct work_struct *work)
5824 {
5825         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5826         int err;
5827         unsigned int restart_timer;
5828
5829         tg3_full_lock(tp, 0);
5830
5831         if (!netif_running(tp->dev)) {
5832                 tg3_full_unlock(tp);
5833                 return;
5834         }
5835
5836         tg3_full_unlock(tp);
5837
5838         tg3_phy_stop(tp);
5839
5840         tg3_netif_stop(tp);
5841
5842         tg3_full_lock(tp, 1);
5843
5844         restart_timer = tg3_flag(tp, RESTART_TIMER);
5845         tg3_flag_clear(tp, RESTART_TIMER);
5846
5847         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5848                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5849                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5850                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5851                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5852         }
5853
5854         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5855         err = tg3_init_hw(tp, 1);
5856         if (err)
5857                 goto out;
5858
5859         tg3_netif_start(tp);
5860
5861         if (restart_timer)
5862                 mod_timer(&tp->timer, jiffies + 1);
5863
5864 out:
5865         tg3_full_unlock(tp);
5866
5867         if (!err)
5868                 tg3_phy_start(tp);
5869 }
5870
5871 static void tg3_tx_timeout(struct net_device *dev)
5872 {
5873         struct tg3 *tp = netdev_priv(dev);
5874
5875         if (netif_msg_tx_err(tp)) {
5876                 netdev_err(dev, "transmit timed out, resetting\n");
5877                 tg3_dump_state(tp);
5878         }
5879
5880         schedule_work(&tp->reset_task);
5881 }
5882
5883 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5884 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5885 {
5886         u32 base = (u32) mapping & 0xffffffff;
5887
5888         return (base > 0xffffdcc0) && (base + len + 8 < base);
5889 }
5890
5891 /* Test for DMA addresses > 40-bit */
5892 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5893                                           int len)
5894 {
5895 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5896         if (tg3_flag(tp, 40BIT_DMA_BUG))
5897                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5898         return 0;
5899 #else
5900         return 0;
5901 #endif
5902 }
5903
5904 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5905                         dma_addr_t mapping, int len, u32 flags,
5906                         u32 mss_and_is_end)
5907 {
5908         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5909         int is_end = (mss_and_is_end & 0x1);
5910         u32 mss = (mss_and_is_end >> 1);
5911         u32 vlan_tag = 0;
5912
5913         if (is_end)
5914                 flags |= TXD_FLAG_END;
5915         if (flags & TXD_FLAG_VLAN) {
5916                 vlan_tag = flags >> 16;
5917                 flags &= 0xffff;
5918         }
5919         vlan_tag |= (mss << TXD_MSS_SHIFT);
5920
5921         txd->addr_hi = ((u64) mapping >> 32);
5922         txd->addr_lo = ((u64) mapping & 0xffffffff);
5923         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5924         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5925 }
5926
5927 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5928                                 struct sk_buff *skb, int last)
5929 {
5930         int i;
5931         u32 entry = tnapi->tx_prod;
5932         struct ring_info *txb = &tnapi->tx_buffers[entry];
5933
5934         pci_unmap_single(tnapi->tp->pdev,
5935                          dma_unmap_addr(txb, mapping),
5936                          skb_headlen(skb),
5937                          PCI_DMA_TODEVICE);
5938         for (i = 0; i < last; i++) {
5939                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5940
5941                 entry = NEXT_TX(entry);
5942                 txb = &tnapi->tx_buffers[entry];
5943
5944                 pci_unmap_page(tnapi->tp->pdev,
5945                                dma_unmap_addr(txb, mapping),
5946                                frag->size, PCI_DMA_TODEVICE);
5947         }
5948 }
5949
5950 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5951 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5952                                        struct sk_buff *skb,
5953                                        u32 base_flags, u32 mss)
5954 {
5955         struct tg3 *tp = tnapi->tp;
5956         struct sk_buff *new_skb;
5957         dma_addr_t new_addr = 0;
5958         u32 entry = tnapi->tx_prod;
5959         int ret = 0;
5960
5961         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5962                 new_skb = skb_copy(skb, GFP_ATOMIC);
5963         else {
5964                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5965
5966                 new_skb = skb_copy_expand(skb,
5967                                           skb_headroom(skb) + more_headroom,
5968                                           skb_tailroom(skb), GFP_ATOMIC);
5969         }
5970
5971         if (!new_skb) {
5972                 ret = -1;
5973         } else {
5974                 /* New SKB is guaranteed to be linear. */
5975                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5976                                           PCI_DMA_TODEVICE);
5977                 /* Make sure the mapping succeeded */
5978                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5979                         ret = -1;
5980                         dev_kfree_skb(new_skb);
5981
5982                 /* Make sure new skb does not cross any 4G boundaries.
5983                  * Drop the packet if it does.
5984                  */
5985                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5986                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5987                                          PCI_DMA_TODEVICE);
5988                         ret = -1;
5989                         dev_kfree_skb(new_skb);
5990                 } else {
5991                         tnapi->tx_buffers[entry].skb = new_skb;
5992                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5993                                            mapping, new_addr);
5994
5995                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5996                                     base_flags, 1 | (mss << 1));
5997                 }
5998         }
5999
6000         dev_kfree_skb(skb);
6001
6002         return ret;
6003 }
6004
6005 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6006
6007 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6008  * TSO header is greater than 80 bytes.
6009  */
6010 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6011 {
6012         struct sk_buff *segs, *nskb;
6013         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6014
6015         /* Estimate the number of fragments in the worst case */
6016         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6017                 netif_stop_queue(tp->dev);
6018
6019                 /* netif_tx_stop_queue() must be done before checking
6020                  * checking tx index in tg3_tx_avail() below, because in
6021                  * tg3_tx(), we update tx index before checking for
6022                  * netif_tx_queue_stopped().
6023                  */
6024                 smp_mb();
6025                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6026                         return NETDEV_TX_BUSY;
6027
6028                 netif_wake_queue(tp->dev);
6029         }
6030
6031         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6032         if (IS_ERR(segs))
6033                 goto tg3_tso_bug_end;
6034
6035         do {
6036                 nskb = segs;
6037                 segs = segs->next;
6038                 nskb->next = NULL;
6039                 tg3_start_xmit(nskb, tp->dev);
6040         } while (segs);
6041
6042 tg3_tso_bug_end:
6043         dev_kfree_skb(skb);
6044
6045         return NETDEV_TX_OK;
6046 }
6047
6048 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6049  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6050  */
6051 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6052 {
6053         struct tg3 *tp = netdev_priv(dev);
6054         u32 len, entry, base_flags, mss;
6055         int i = -1, would_hit_hwbug;
6056         dma_addr_t mapping;
6057         struct tg3_napi *tnapi;
6058         struct netdev_queue *txq;
6059         unsigned int last;
6060
6061         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6062         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6063         if (tg3_flag(tp, ENABLE_TSS))
6064                 tnapi++;
6065
6066         /* We are running in BH disabled context with netif_tx_lock
6067          * and TX reclaim runs via tp->napi.poll inside of a software
6068          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6069          * no IRQ context deadlocks to worry about either.  Rejoice!
6070          */
6071         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6072                 if (!netif_tx_queue_stopped(txq)) {
6073                         netif_tx_stop_queue(txq);
6074
6075                         /* This is a hard error, log it. */
6076                         netdev_err(dev,
6077                                    "BUG! Tx Ring full when queue awake!\n");
6078                 }
6079                 return NETDEV_TX_BUSY;
6080         }
6081
6082         entry = tnapi->tx_prod;
6083         base_flags = 0;
6084         if (skb->ip_summed == CHECKSUM_PARTIAL)
6085                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6086
6087         mss = skb_shinfo(skb)->gso_size;
6088         if (mss) {
6089                 struct iphdr *iph;
6090                 u32 tcp_opt_len, hdr_len;
6091
6092                 if (skb_header_cloned(skb) &&
6093                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6094                         dev_kfree_skb(skb);
6095                         goto out_unlock;
6096                 }
6097
6098                 iph = ip_hdr(skb);
6099                 tcp_opt_len = tcp_optlen(skb);
6100
6101                 if (skb_is_gso_v6(skb)) {
6102                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6103                 } else {
6104                         u32 ip_tcp_len;
6105
6106                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6107                         hdr_len = ip_tcp_len + tcp_opt_len;
6108
6109                         iph->check = 0;
6110                         iph->tot_len = htons(mss + hdr_len);
6111                 }
6112
6113                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6114                     tg3_flag(tp, TSO_BUG))
6115                         return tg3_tso_bug(tp, skb);
6116
6117                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6118                                TXD_FLAG_CPU_POST_DMA);
6119
6120                 if (tg3_flag(tp, HW_TSO_1) ||
6121                     tg3_flag(tp, HW_TSO_2) ||
6122                     tg3_flag(tp, HW_TSO_3)) {
6123                         tcp_hdr(skb)->check = 0;
6124                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6125                 } else
6126                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6127                                                                  iph->daddr, 0,
6128                                                                  IPPROTO_TCP,
6129                                                                  0);
6130
6131                 if (tg3_flag(tp, HW_TSO_3)) {
6132                         mss |= (hdr_len & 0xc) << 12;
6133                         if (hdr_len & 0x10)
6134                                 base_flags |= 0x00000010;
6135                         base_flags |= (hdr_len & 0x3e0) << 5;
6136                 } else if (tg3_flag(tp, HW_TSO_2))
6137                         mss |= hdr_len << 9;
6138                 else if (tg3_flag(tp, HW_TSO_1) ||
6139                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6140                         if (tcp_opt_len || iph->ihl > 5) {
6141                                 int tsflags;
6142
6143                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6144                                 mss |= (tsflags << 11);
6145                         }
6146                 } else {
6147                         if (tcp_opt_len || iph->ihl > 5) {
6148                                 int tsflags;
6149
6150                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6151                                 base_flags |= tsflags << 12;
6152                         }
6153                 }
6154         }
6155
6156         if (vlan_tx_tag_present(skb))
6157                 base_flags |= (TXD_FLAG_VLAN |
6158                                (vlan_tx_tag_get(skb) << 16));
6159
6160         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6161             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6162                 base_flags |= TXD_FLAG_JMB_PKT;
6163
6164         len = skb_headlen(skb);
6165
6166         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6167         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6168                 dev_kfree_skb(skb);
6169                 goto out_unlock;
6170         }
6171
6172         tnapi->tx_buffers[entry].skb = skb;
6173         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6174
6175         would_hit_hwbug = 0;
6176
6177         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6178                 would_hit_hwbug = 1;
6179
6180         if (tg3_4g_overflow_test(mapping, len))
6181                 would_hit_hwbug = 1;
6182
6183         if (tg3_40bit_overflow_test(tp, mapping, len))
6184                 would_hit_hwbug = 1;
6185
6186         if (tg3_flag(tp, 5701_DMA_BUG))
6187                 would_hit_hwbug = 1;
6188
6189         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6190                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6191
6192         entry = NEXT_TX(entry);
6193
6194         /* Now loop through additional data fragments, and queue them. */
6195         if (skb_shinfo(skb)->nr_frags > 0) {
6196                 last = skb_shinfo(skb)->nr_frags - 1;
6197                 for (i = 0; i <= last; i++) {
6198                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6199
6200                         len = frag->size;
6201                         mapping = pci_map_page(tp->pdev,
6202                                                frag->page,
6203                                                frag->page_offset,
6204                                                len, PCI_DMA_TODEVICE);
6205
6206                         tnapi->tx_buffers[entry].skb = NULL;
6207                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6208                                            mapping);
6209                         if (pci_dma_mapping_error(tp->pdev, mapping))
6210                                 goto dma_error;
6211
6212                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6213                             len <= 8)
6214                                 would_hit_hwbug = 1;
6215
6216                         if (tg3_4g_overflow_test(mapping, len))
6217                                 would_hit_hwbug = 1;
6218
6219                         if (tg3_40bit_overflow_test(tp, mapping, len))
6220                                 would_hit_hwbug = 1;
6221
6222                         if (tg3_flag(tp, HW_TSO_1) ||
6223                             tg3_flag(tp, HW_TSO_2) ||
6224                             tg3_flag(tp, HW_TSO_3))
6225                                 tg3_set_txd(tnapi, entry, mapping, len,
6226                                             base_flags, (i == last)|(mss << 1));
6227                         else
6228                                 tg3_set_txd(tnapi, entry, mapping, len,
6229                                             base_flags, (i == last));
6230
6231                         entry = NEXT_TX(entry);
6232                 }
6233         }
6234
6235         if (would_hit_hwbug) {
6236                 tg3_skb_error_unmap(tnapi, skb, i);
6237
6238                 /* If the workaround fails due to memory/mapping
6239                  * failure, silently drop this packet.
6240                  */
6241                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6242                         goto out_unlock;
6243
6244                 entry = NEXT_TX(tnapi->tx_prod);
6245         }
6246
6247         skb_tx_timestamp(skb);
6248
6249         /* Packets are ready, update Tx producer idx local and on card. */
6250         tw32_tx_mbox(tnapi->prodmbox, entry);
6251
6252         tnapi->tx_prod = entry;
6253         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6254                 netif_tx_stop_queue(txq);
6255
6256                 /* netif_tx_stop_queue() must be done before checking
6257                  * checking tx index in tg3_tx_avail() below, because in
6258                  * tg3_tx(), we update tx index before checking for
6259                  * netif_tx_queue_stopped().
6260                  */
6261                 smp_mb();
6262                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6263                         netif_tx_wake_queue(txq);
6264         }
6265
6266 out_unlock:
6267         mmiowb();
6268
6269         return NETDEV_TX_OK;
6270
6271 dma_error:
6272         tg3_skb_error_unmap(tnapi, skb, i);
6273         dev_kfree_skb(skb);
6274         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6275         return NETDEV_TX_OK;
6276 }
6277
6278 static void tg3_set_loopback(struct net_device *dev, u32 features)
6279 {
6280         struct tg3 *tp = netdev_priv(dev);
6281
6282         if (features & NETIF_F_LOOPBACK) {
6283                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6284                         return;
6285
6286                 /*
6287                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6288                  * loopback mode if Half-Duplex mode was negotiated earlier.
6289                  */
6290                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6291
6292                 /* Enable internal MAC loopback mode */
6293                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6294                 spin_lock_bh(&tp->lock);
6295                 tw32(MAC_MODE, tp->mac_mode);
6296                 netif_carrier_on(tp->dev);
6297                 spin_unlock_bh(&tp->lock);
6298                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6299         } else {
6300                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6301                         return;
6302
6303                 /* Disable internal MAC loopback mode */
6304                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6305                 spin_lock_bh(&tp->lock);
6306                 tw32(MAC_MODE, tp->mac_mode);
6307                 /* Force link status check */
6308                 tg3_setup_phy(tp, 1);
6309                 spin_unlock_bh(&tp->lock);
6310                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6311         }
6312 }
6313
6314 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6315 {
6316         struct tg3 *tp = netdev_priv(dev);
6317
6318         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6319                 features &= ~NETIF_F_ALL_TSO;
6320
6321         return features;
6322 }
6323
6324 static int tg3_set_features(struct net_device *dev, u32 features)
6325 {
6326         u32 changed = dev->features ^ features;
6327
6328         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6329                 tg3_set_loopback(dev, features);
6330
6331         return 0;
6332 }
6333
6334 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6335                                int new_mtu)
6336 {
6337         dev->mtu = new_mtu;
6338
6339         if (new_mtu > ETH_DATA_LEN) {
6340                 if (tg3_flag(tp, 5780_CLASS)) {
6341                         netdev_update_features(dev);
6342                         tg3_flag_clear(tp, TSO_CAPABLE);
6343                 } else {
6344                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6345                 }
6346         } else {
6347                 if (tg3_flag(tp, 5780_CLASS)) {
6348                         tg3_flag_set(tp, TSO_CAPABLE);
6349                         netdev_update_features(dev);
6350                 }
6351                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6352         }
6353 }
6354
6355 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6356 {
6357         struct tg3 *tp = netdev_priv(dev);
6358         int err;
6359
6360         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6361                 return -EINVAL;
6362
6363         if (!netif_running(dev)) {
6364                 /* We'll just catch it later when the
6365                  * device is up'd.
6366                  */
6367                 tg3_set_mtu(dev, tp, new_mtu);
6368                 return 0;
6369         }
6370
6371         tg3_phy_stop(tp);
6372
6373         tg3_netif_stop(tp);
6374
6375         tg3_full_lock(tp, 1);
6376
6377         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6378
6379         tg3_set_mtu(dev, tp, new_mtu);
6380
6381         err = tg3_restart_hw(tp, 0);
6382
6383         if (!err)
6384                 tg3_netif_start(tp);
6385
6386         tg3_full_unlock(tp);
6387
6388         if (!err)
6389                 tg3_phy_start(tp);
6390
6391         return err;
6392 }
6393
6394 static void tg3_rx_prodring_free(struct tg3 *tp,
6395                                  struct tg3_rx_prodring_set *tpr)
6396 {
6397         int i;
6398
6399         if (tpr != &tp->napi[0].prodring) {
6400                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6401                      i = (i + 1) & tp->rx_std_ring_mask)
6402                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6403                                         tp->rx_pkt_map_sz);
6404
6405                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6406                         for (i = tpr->rx_jmb_cons_idx;
6407                              i != tpr->rx_jmb_prod_idx;
6408                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6409                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6410                                                 TG3_RX_JMB_MAP_SZ);
6411                         }
6412                 }
6413
6414                 return;
6415         }
6416
6417         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6418                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6419                                 tp->rx_pkt_map_sz);
6420
6421         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6422                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6423                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6424                                         TG3_RX_JMB_MAP_SZ);
6425         }
6426 }
6427
6428 /* Initialize rx rings for packet processing.
6429  *
6430  * The chip has been shut down and the driver detached from
6431  * the networking, so no interrupts or new tx packets will
6432  * end up in the driver.  tp->{tx,}lock are held and thus
6433  * we may not sleep.
6434  */
6435 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6436                                  struct tg3_rx_prodring_set *tpr)
6437 {
6438         u32 i, rx_pkt_dma_sz;
6439
6440         tpr->rx_std_cons_idx = 0;
6441         tpr->rx_std_prod_idx = 0;
6442         tpr->rx_jmb_cons_idx = 0;
6443         tpr->rx_jmb_prod_idx = 0;
6444
6445         if (tpr != &tp->napi[0].prodring) {
6446                 memset(&tpr->rx_std_buffers[0], 0,
6447                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6448                 if (tpr->rx_jmb_buffers)
6449                         memset(&tpr->rx_jmb_buffers[0], 0,
6450                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6451                 goto done;
6452         }
6453
6454         /* Zero out all descriptors. */
6455         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6456
6457         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6458         if (tg3_flag(tp, 5780_CLASS) &&
6459             tp->dev->mtu > ETH_DATA_LEN)
6460                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6461         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6462
6463         /* Initialize invariants of the rings, we only set this
6464          * stuff once.  This works because the card does not
6465          * write into the rx buffer posting rings.
6466          */
6467         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6468                 struct tg3_rx_buffer_desc *rxd;
6469
6470                 rxd = &tpr->rx_std[i];
6471                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6472                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6473                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6474                                (i << RXD_OPAQUE_INDEX_SHIFT));
6475         }
6476
6477         /* Now allocate fresh SKBs for each rx ring. */
6478         for (i = 0; i < tp->rx_pending; i++) {
6479                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6480                         netdev_warn(tp->dev,
6481                                     "Using a smaller RX standard ring. Only "
6482                                     "%d out of %d buffers were allocated "
6483                                     "successfully\n", i, tp->rx_pending);
6484                         if (i == 0)
6485                                 goto initfail;
6486                         tp->rx_pending = i;
6487                         break;
6488                 }
6489         }
6490
6491         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6492                 goto done;
6493
6494         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6495
6496         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6497                 goto done;
6498
6499         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6500                 struct tg3_rx_buffer_desc *rxd;
6501
6502                 rxd = &tpr->rx_jmb[i].std;
6503                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6504                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6505                                   RXD_FLAG_JUMBO;
6506                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6507                        (i << RXD_OPAQUE_INDEX_SHIFT));
6508         }
6509
6510         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6511                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6512                         netdev_warn(tp->dev,
6513                                     "Using a smaller RX jumbo ring. Only %d "
6514                                     "out of %d buffers were allocated "
6515                                     "successfully\n", i, tp->rx_jumbo_pending);
6516                         if (i == 0)
6517                                 goto initfail;
6518                         tp->rx_jumbo_pending = i;
6519                         break;
6520                 }
6521         }
6522
6523 done:
6524         return 0;
6525
6526 initfail:
6527         tg3_rx_prodring_free(tp, tpr);
6528         return -ENOMEM;
6529 }
6530
6531 static void tg3_rx_prodring_fini(struct tg3 *tp,
6532                                  struct tg3_rx_prodring_set *tpr)
6533 {
6534         kfree(tpr->rx_std_buffers);
6535         tpr->rx_std_buffers = NULL;
6536         kfree(tpr->rx_jmb_buffers);
6537         tpr->rx_jmb_buffers = NULL;
6538         if (tpr->rx_std) {
6539                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6540                                   tpr->rx_std, tpr->rx_std_mapping);
6541                 tpr->rx_std = NULL;
6542         }
6543         if (tpr->rx_jmb) {
6544                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6545                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6546                 tpr->rx_jmb = NULL;
6547         }
6548 }
6549
6550 static int tg3_rx_prodring_init(struct tg3 *tp,
6551                                 struct tg3_rx_prodring_set *tpr)
6552 {
6553         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6554                                       GFP_KERNEL);
6555         if (!tpr->rx_std_buffers)
6556                 return -ENOMEM;
6557
6558         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6559                                          TG3_RX_STD_RING_BYTES(tp),
6560                                          &tpr->rx_std_mapping,
6561                                          GFP_KERNEL);
6562         if (!tpr->rx_std)
6563                 goto err_out;
6564
6565         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6566                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6567                                               GFP_KERNEL);
6568                 if (!tpr->rx_jmb_buffers)
6569                         goto err_out;
6570
6571                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6572                                                  TG3_RX_JMB_RING_BYTES(tp),
6573                                                  &tpr->rx_jmb_mapping,
6574                                                  GFP_KERNEL);
6575                 if (!tpr->rx_jmb)
6576                         goto err_out;
6577         }
6578
6579         return 0;
6580
6581 err_out:
6582         tg3_rx_prodring_fini(tp, tpr);
6583         return -ENOMEM;
6584 }
6585
6586 /* Free up pending packets in all rx/tx rings.
6587  *
6588  * The chip has been shut down and the driver detached from
6589  * the networking, so no interrupts or new tx packets will
6590  * end up in the driver.  tp->{tx,}lock is not held and we are not
6591  * in an interrupt context and thus may sleep.
6592  */
6593 static void tg3_free_rings(struct tg3 *tp)
6594 {
6595         int i, j;
6596
6597         for (j = 0; j < tp->irq_cnt; j++) {
6598                 struct tg3_napi *tnapi = &tp->napi[j];
6599
6600                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6601
6602                 if (!tnapi->tx_buffers)
6603                         continue;
6604
6605                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6606                         struct ring_info *txp;
6607                         struct sk_buff *skb;
6608                         unsigned int k;
6609
6610                         txp = &tnapi->tx_buffers[i];
6611                         skb = txp->skb;
6612
6613                         if (skb == NULL) {
6614                                 i++;
6615                                 continue;
6616                         }
6617
6618                         pci_unmap_single(tp->pdev,
6619                                          dma_unmap_addr(txp, mapping),
6620                                          skb_headlen(skb),
6621                                          PCI_DMA_TODEVICE);
6622                         txp->skb = NULL;
6623
6624                         i++;
6625
6626                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6627                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6628                                 pci_unmap_page(tp->pdev,
6629                                                dma_unmap_addr(txp, mapping),
6630                                                skb_shinfo(skb)->frags[k].size,
6631                                                PCI_DMA_TODEVICE);
6632                                 i++;
6633                         }
6634
6635                         dev_kfree_skb_any(skb);
6636                 }
6637         }
6638 }
6639
6640 /* Initialize tx/rx rings for packet processing.
6641  *
6642  * The chip has been shut down and the driver detached from
6643  * the networking, so no interrupts or new tx packets will
6644  * end up in the driver.  tp->{tx,}lock are held and thus
6645  * we may not sleep.
6646  */
6647 static int tg3_init_rings(struct tg3 *tp)
6648 {
6649         int i;
6650
6651         /* Free up all the SKBs. */
6652         tg3_free_rings(tp);
6653
6654         for (i = 0; i < tp->irq_cnt; i++) {
6655                 struct tg3_napi *tnapi = &tp->napi[i];
6656
6657                 tnapi->last_tag = 0;
6658                 tnapi->last_irq_tag = 0;
6659                 tnapi->hw_status->status = 0;
6660                 tnapi->hw_status->status_tag = 0;
6661                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6662
6663                 tnapi->tx_prod = 0;
6664                 tnapi->tx_cons = 0;
6665                 if (tnapi->tx_ring)
6666                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6667
6668                 tnapi->rx_rcb_ptr = 0;
6669                 if (tnapi->rx_rcb)
6670                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6671
6672                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6673                         tg3_free_rings(tp);
6674                         return -ENOMEM;
6675                 }
6676         }
6677
6678         return 0;
6679 }
6680
6681 /*
6682  * Must not be invoked with interrupt sources disabled and
6683  * the hardware shutdown down.
6684  */
6685 static void tg3_free_consistent(struct tg3 *tp)
6686 {
6687         int i;
6688
6689         for (i = 0; i < tp->irq_cnt; i++) {
6690                 struct tg3_napi *tnapi = &tp->napi[i];
6691
6692                 if (tnapi->tx_ring) {
6693                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6694                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6695                         tnapi->tx_ring = NULL;
6696                 }
6697
6698                 kfree(tnapi->tx_buffers);
6699                 tnapi->tx_buffers = NULL;
6700
6701                 if (tnapi->rx_rcb) {
6702                         dma_free_coherent(&tp->pdev->dev,
6703                                           TG3_RX_RCB_RING_BYTES(tp),
6704                                           tnapi->rx_rcb,
6705                                           tnapi->rx_rcb_mapping);
6706                         tnapi->rx_rcb = NULL;
6707                 }
6708
6709                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6710
6711                 if (tnapi->hw_status) {
6712                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6713                                           tnapi->hw_status,
6714                                           tnapi->status_mapping);
6715                         tnapi->hw_status = NULL;
6716                 }
6717         }
6718
6719         if (tp->hw_stats) {
6720                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6721                                   tp->hw_stats, tp->stats_mapping);
6722                 tp->hw_stats = NULL;
6723         }
6724 }
6725
6726 /*
6727  * Must not be invoked with interrupt sources disabled and
6728  * the hardware shutdown down.  Can sleep.
6729  */
6730 static int tg3_alloc_consistent(struct tg3 *tp)
6731 {
6732         int i;
6733
6734         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6735                                           sizeof(struct tg3_hw_stats),
6736                                           &tp->stats_mapping,
6737                                           GFP_KERNEL);
6738         if (!tp->hw_stats)
6739                 goto err_out;
6740
6741         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6742
6743         for (i = 0; i < tp->irq_cnt; i++) {
6744                 struct tg3_napi *tnapi = &tp->napi[i];
6745                 struct tg3_hw_status *sblk;
6746
6747                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6748                                                       TG3_HW_STATUS_SIZE,
6749                                                       &tnapi->status_mapping,
6750                                                       GFP_KERNEL);
6751                 if (!tnapi->hw_status)
6752                         goto err_out;
6753
6754                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6755                 sblk = tnapi->hw_status;
6756
6757                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6758                         goto err_out;
6759
6760                 /* If multivector TSS is enabled, vector 0 does not handle
6761                  * tx interrupts.  Don't allocate any resources for it.
6762                  */
6763                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6764                     (i && tg3_flag(tp, ENABLE_TSS))) {
6765                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6766                                                     TG3_TX_RING_SIZE,
6767                                                     GFP_KERNEL);
6768                         if (!tnapi->tx_buffers)
6769                                 goto err_out;
6770
6771                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6772                                                             TG3_TX_RING_BYTES,
6773                                                         &tnapi->tx_desc_mapping,
6774                                                             GFP_KERNEL);
6775                         if (!tnapi->tx_ring)
6776                                 goto err_out;
6777                 }
6778
6779                 /*
6780                  * When RSS is enabled, the status block format changes
6781                  * slightly.  The "rx_jumbo_consumer", "reserved",
6782                  * and "rx_mini_consumer" members get mapped to the
6783                  * other three rx return ring producer indexes.
6784                  */
6785                 switch (i) {
6786                 default:
6787                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6788                         break;
6789                 case 2:
6790                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6791                         break;
6792                 case 3:
6793                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6794                         break;
6795                 case 4:
6796                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6797                         break;
6798                 }
6799
6800                 /*
6801                  * If multivector RSS is enabled, vector 0 does not handle
6802                  * rx or tx interrupts.  Don't allocate any resources for it.
6803                  */
6804                 if (!i && tg3_flag(tp, ENABLE_RSS))
6805                         continue;
6806
6807                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6808                                                    TG3_RX_RCB_RING_BYTES(tp),
6809                                                    &tnapi->rx_rcb_mapping,
6810                                                    GFP_KERNEL);
6811                 if (!tnapi->rx_rcb)
6812                         goto err_out;
6813
6814                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6815         }
6816
6817         return 0;
6818
6819 err_out:
6820         tg3_free_consistent(tp);
6821         return -ENOMEM;
6822 }
6823
6824 #define MAX_WAIT_CNT 1000
6825
6826 /* To stop a block, clear the enable bit and poll till it
6827  * clears.  tp->lock is held.
6828  */
6829 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6830 {
6831         unsigned int i;
6832         u32 val;
6833
6834         if (tg3_flag(tp, 5705_PLUS)) {
6835                 switch (ofs) {
6836                 case RCVLSC_MODE:
6837                 case DMAC_MODE:
6838                 case MBFREE_MODE:
6839                 case BUFMGR_MODE:
6840                 case MEMARB_MODE:
6841                         /* We can't enable/disable these bits of the
6842                          * 5705/5750, just say success.
6843                          */
6844                         return 0;
6845
6846                 default:
6847                         break;
6848                 }
6849         }
6850
6851         val = tr32(ofs);
6852         val &= ~enable_bit;
6853         tw32_f(ofs, val);
6854
6855         for (i = 0; i < MAX_WAIT_CNT; i++) {
6856                 udelay(100);
6857                 val = tr32(ofs);
6858                 if ((val & enable_bit) == 0)
6859                         break;
6860         }
6861
6862         if (i == MAX_WAIT_CNT && !silent) {
6863                 dev_err(&tp->pdev->dev,
6864                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6865                         ofs, enable_bit);
6866                 return -ENODEV;
6867         }
6868
6869         return 0;
6870 }
6871
6872 /* tp->lock is held. */
6873 static int tg3_abort_hw(struct tg3 *tp, int silent)
6874 {
6875         int i, err;
6876
6877         tg3_disable_ints(tp);
6878
6879         tp->rx_mode &= ~RX_MODE_ENABLE;
6880         tw32_f(MAC_RX_MODE, tp->rx_mode);
6881         udelay(10);
6882
6883         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6884         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6885         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6886         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6887         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6888         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6889
6890         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6891         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6892         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6893         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6894         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6895         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6896         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6897
6898         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6899         tw32_f(MAC_MODE, tp->mac_mode);
6900         udelay(40);
6901
6902         tp->tx_mode &= ~TX_MODE_ENABLE;
6903         tw32_f(MAC_TX_MODE, tp->tx_mode);
6904
6905         for (i = 0; i < MAX_WAIT_CNT; i++) {
6906                 udelay(100);
6907                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6908                         break;
6909         }
6910         if (i >= MAX_WAIT_CNT) {
6911                 dev_err(&tp->pdev->dev,
6912                         "%s timed out, TX_MODE_ENABLE will not clear "
6913                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6914                 err |= -ENODEV;
6915         }
6916
6917         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6918         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6919         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6920
6921         tw32(FTQ_RESET, 0xffffffff);
6922         tw32(FTQ_RESET, 0x00000000);
6923
6924         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6925         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6926
6927         for (i = 0; i < tp->irq_cnt; i++) {
6928                 struct tg3_napi *tnapi = &tp->napi[i];
6929                 if (tnapi->hw_status)
6930                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6931         }
6932         if (tp->hw_stats)
6933                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6934
6935         return err;
6936 }
6937
6938 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6939 {
6940         int i;
6941         u32 apedata;
6942
6943         /* NCSI does not support APE events */
6944         if (tg3_flag(tp, APE_HAS_NCSI))
6945                 return;
6946
6947         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6948         if (apedata != APE_SEG_SIG_MAGIC)
6949                 return;
6950
6951         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6952         if (!(apedata & APE_FW_STATUS_READY))
6953                 return;
6954
6955         /* Wait for up to 1 millisecond for APE to service previous event. */
6956         for (i = 0; i < 10; i++) {
6957                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6958                         return;
6959
6960                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6961
6962                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6963                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6964                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6965
6966                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6967
6968                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6969                         break;
6970
6971                 udelay(100);
6972         }
6973
6974         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6975                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6976 }
6977
6978 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6979 {
6980         u32 event;
6981         u32 apedata;
6982
6983         if (!tg3_flag(tp, ENABLE_APE))
6984                 return;
6985
6986         switch (kind) {
6987         case RESET_KIND_INIT:
6988                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6989                                 APE_HOST_SEG_SIG_MAGIC);
6990                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6991                                 APE_HOST_SEG_LEN_MAGIC);
6992                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6993                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6994                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6995                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6996                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6997                                 APE_HOST_BEHAV_NO_PHYLOCK);
6998                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6999                                     TG3_APE_HOST_DRVR_STATE_START);
7000
7001                 event = APE_EVENT_STATUS_STATE_START;
7002                 break;
7003         case RESET_KIND_SHUTDOWN:
7004                 /* With the interface we are currently using,
7005                  * APE does not track driver state.  Wiping
7006                  * out the HOST SEGMENT SIGNATURE forces
7007                  * the APE to assume OS absent status.
7008                  */
7009                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7010
7011                 if (device_may_wakeup(&tp->pdev->dev) &&
7012                     tg3_flag(tp, WOL_ENABLE)) {
7013                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7014                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7015                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7016                 } else
7017                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7018
7019                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7020
7021                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7022                 break;
7023         case RESET_KIND_SUSPEND:
7024                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7025                 break;
7026         default:
7027                 return;
7028         }
7029
7030         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7031
7032         tg3_ape_send_event(tp, event);
7033 }
7034
7035 /* tp->lock is held. */
7036 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7037 {
7038         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7039                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7040
7041         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7042                 switch (kind) {
7043                 case RESET_KIND_INIT:
7044                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7045                                       DRV_STATE_START);
7046                         break;
7047
7048                 case RESET_KIND_SHUTDOWN:
7049                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7050                                       DRV_STATE_UNLOAD);
7051                         break;
7052
7053                 case RESET_KIND_SUSPEND:
7054                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7055                                       DRV_STATE_SUSPEND);
7056                         break;
7057
7058                 default:
7059                         break;
7060                 }
7061         }
7062
7063         if (kind == RESET_KIND_INIT ||
7064             kind == RESET_KIND_SUSPEND)
7065                 tg3_ape_driver_state_change(tp, kind);
7066 }
7067
7068 /* tp->lock is held. */
7069 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7070 {
7071         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7072                 switch (kind) {
7073                 case RESET_KIND_INIT:
7074                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7075                                       DRV_STATE_START_DONE);
7076                         break;
7077
7078                 case RESET_KIND_SHUTDOWN:
7079                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7080                                       DRV_STATE_UNLOAD_DONE);
7081                         break;
7082
7083                 default:
7084                         break;
7085                 }
7086         }
7087
7088         if (kind == RESET_KIND_SHUTDOWN)
7089                 tg3_ape_driver_state_change(tp, kind);
7090 }
7091
7092 /* tp->lock is held. */
7093 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7094 {
7095         if (tg3_flag(tp, ENABLE_ASF)) {
7096                 switch (kind) {
7097                 case RESET_KIND_INIT:
7098                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7099                                       DRV_STATE_START);
7100                         break;
7101
7102                 case RESET_KIND_SHUTDOWN:
7103                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7104                                       DRV_STATE_UNLOAD);
7105                         break;
7106
7107                 case RESET_KIND_SUSPEND:
7108                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7109                                       DRV_STATE_SUSPEND);
7110                         break;
7111
7112                 default:
7113                         break;
7114                 }
7115         }
7116 }
7117
7118 static int tg3_poll_fw(struct tg3 *tp)
7119 {
7120         int i;
7121         u32 val;
7122
7123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7124                 /* Wait up to 20ms for init done. */
7125                 for (i = 0; i < 200; i++) {
7126                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7127                                 return 0;
7128                         udelay(100);
7129                 }
7130                 return -ENODEV;
7131         }
7132
7133         /* Wait for firmware initialization to complete. */
7134         for (i = 0; i < 100000; i++) {
7135                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7136                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7137                         break;
7138                 udelay(10);
7139         }
7140
7141         /* Chip might not be fitted with firmware.  Some Sun onboard
7142          * parts are configured like that.  So don't signal the timeout
7143          * of the above loop as an error, but do report the lack of
7144          * running firmware once.
7145          */
7146         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7147                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7148
7149                 netdev_info(tp->dev, "No firmware running\n");
7150         }
7151
7152         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7153                 /* The 57765 A0 needs a little more
7154                  * time to do some important work.
7155                  */
7156                 mdelay(10);
7157         }
7158
7159         return 0;
7160 }
7161
7162 /* Save PCI command register before chip reset */
7163 static void tg3_save_pci_state(struct tg3 *tp)
7164 {
7165         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7166 }
7167
7168 /* Restore PCI state after chip reset */
7169 static void tg3_restore_pci_state(struct tg3 *tp)
7170 {
7171         u32 val;
7172
7173         /* Re-enable indirect register accesses. */
7174         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7175                                tp->misc_host_ctrl);
7176
7177         /* Set MAX PCI retry to zero. */
7178         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7179         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7180             tg3_flag(tp, PCIX_MODE))
7181                 val |= PCISTATE_RETRY_SAME_DMA;
7182         /* Allow reads and writes to the APE register and memory space. */
7183         if (tg3_flag(tp, ENABLE_APE))
7184                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7185                        PCISTATE_ALLOW_APE_SHMEM_WR |
7186                        PCISTATE_ALLOW_APE_PSPACE_WR;
7187         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7188
7189         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7190
7191         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7192                 if (tg3_flag(tp, PCI_EXPRESS))
7193                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7194                 else {
7195                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7196                                               tp->pci_cacheline_sz);
7197                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7198                                               tp->pci_lat_timer);
7199                 }
7200         }
7201
7202         /* Make sure PCI-X relaxed ordering bit is clear. */
7203         if (tg3_flag(tp, PCIX_MODE)) {
7204                 u16 pcix_cmd;
7205
7206                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7207                                      &pcix_cmd);
7208                 pcix_cmd &= ~PCI_X_CMD_ERO;
7209                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7210                                       pcix_cmd);
7211         }
7212
7213         if (tg3_flag(tp, 5780_CLASS)) {
7214
7215                 /* Chip reset on 5780 will reset MSI enable bit,
7216                  * so need to restore it.
7217                  */
7218                 if (tg3_flag(tp, USING_MSI)) {
7219                         u16 ctrl;
7220
7221                         pci_read_config_word(tp->pdev,
7222                                              tp->msi_cap + PCI_MSI_FLAGS,
7223                                              &ctrl);
7224                         pci_write_config_word(tp->pdev,
7225                                               tp->msi_cap + PCI_MSI_FLAGS,
7226                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7227                         val = tr32(MSGINT_MODE);
7228                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7229                 }
7230         }
7231 }
7232
7233 static void tg3_stop_fw(struct tg3 *);
7234
7235 /* tp->lock is held. */
7236 static int tg3_chip_reset(struct tg3 *tp)
7237 {
7238         u32 val;
7239         void (*write_op)(struct tg3 *, u32, u32);
7240         int i, err;
7241
7242         tg3_nvram_lock(tp);
7243
7244         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7245
7246         /* No matching tg3_nvram_unlock() after this because
7247          * chip reset below will undo the nvram lock.
7248          */
7249         tp->nvram_lock_cnt = 0;
7250
7251         /* GRC_MISC_CFG core clock reset will clear the memory
7252          * enable bit in PCI register 4 and the MSI enable bit
7253          * on some chips, so we save relevant registers here.
7254          */
7255         tg3_save_pci_state(tp);
7256
7257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7258             tg3_flag(tp, 5755_PLUS))
7259                 tw32(GRC_FASTBOOT_PC, 0);
7260
7261         /*
7262          * We must avoid the readl() that normally takes place.
7263          * It locks machines, causes machine checks, and other
7264          * fun things.  So, temporarily disable the 5701
7265          * hardware workaround, while we do the reset.
7266          */
7267         write_op = tp->write32;
7268         if (write_op == tg3_write_flush_reg32)
7269                 tp->write32 = tg3_write32;
7270
7271         /* Prevent the irq handler from reading or writing PCI registers
7272          * during chip reset when the memory enable bit in the PCI command
7273          * register may be cleared.  The chip does not generate interrupt
7274          * at this time, but the irq handler may still be called due to irq
7275          * sharing or irqpoll.
7276          */
7277         tg3_flag_set(tp, CHIP_RESETTING);
7278         for (i = 0; i < tp->irq_cnt; i++) {
7279                 struct tg3_napi *tnapi = &tp->napi[i];
7280                 if (tnapi->hw_status) {
7281                         tnapi->hw_status->status = 0;
7282                         tnapi->hw_status->status_tag = 0;
7283                 }
7284                 tnapi->last_tag = 0;
7285                 tnapi->last_irq_tag = 0;
7286         }
7287         smp_mb();
7288
7289         for (i = 0; i < tp->irq_cnt; i++)
7290                 synchronize_irq(tp->napi[i].irq_vec);
7291
7292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7293                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7294                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7295         }
7296
7297         /* do the reset */
7298         val = GRC_MISC_CFG_CORECLK_RESET;
7299
7300         if (tg3_flag(tp, PCI_EXPRESS)) {
7301                 /* Force PCIe 1.0a mode */
7302                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7303                     !tg3_flag(tp, 57765_PLUS) &&
7304                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7305                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7306                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7307
7308                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7309                         tw32(GRC_MISC_CFG, (1 << 29));
7310                         val |= (1 << 29);
7311                 }
7312         }
7313
7314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7315                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7316                 tw32(GRC_VCPU_EXT_CTRL,
7317                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7318         }
7319
7320         /* Manage gphy power for all CPMU absent PCIe devices. */
7321         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7322                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7323
7324         tw32(GRC_MISC_CFG, val);
7325
7326         /* restore 5701 hardware bug workaround write method */
7327         tp->write32 = write_op;
7328
7329         /* Unfortunately, we have to delay before the PCI read back.
7330          * Some 575X chips even will not respond to a PCI cfg access
7331          * when the reset command is given to the chip.
7332          *
7333          * How do these hardware designers expect things to work
7334          * properly if the PCI write is posted for a long period
7335          * of time?  It is always necessary to have some method by
7336          * which a register read back can occur to push the write
7337          * out which does the reset.
7338          *
7339          * For most tg3 variants the trick below was working.
7340          * Ho hum...
7341          */
7342         udelay(120);
7343
7344         /* Flush PCI posted writes.  The normal MMIO registers
7345          * are inaccessible at this time so this is the only
7346          * way to make this reliably (actually, this is no longer
7347          * the case, see above).  I tried to use indirect
7348          * register read/write but this upset some 5701 variants.
7349          */
7350         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7351
7352         udelay(120);
7353
7354         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7355                 u16 val16;
7356
7357                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7358                         int i;
7359                         u32 cfg_val;
7360
7361                         /* Wait for link training to complete.  */
7362                         for (i = 0; i < 5000; i++)
7363                                 udelay(100);
7364
7365                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7366                         pci_write_config_dword(tp->pdev, 0xc4,
7367                                                cfg_val | (1 << 15));
7368                 }
7369
7370                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7371                 pci_read_config_word(tp->pdev,
7372                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7373                                      &val16);
7374                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7375                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7376                 /*
7377                  * Older PCIe devices only support the 128 byte
7378                  * MPS setting.  Enforce the restriction.
7379                  */
7380                 if (!tg3_flag(tp, CPMU_PRESENT))
7381                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7382                 pci_write_config_word(tp->pdev,
7383                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7384                                       val16);
7385
7386                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7387
7388                 /* Clear error status */
7389                 pci_write_config_word(tp->pdev,
7390                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7391                                       PCI_EXP_DEVSTA_CED |
7392                                       PCI_EXP_DEVSTA_NFED |
7393                                       PCI_EXP_DEVSTA_FED |
7394                                       PCI_EXP_DEVSTA_URD);
7395         }
7396
7397         tg3_restore_pci_state(tp);
7398
7399         tg3_flag_clear(tp, CHIP_RESETTING);
7400         tg3_flag_clear(tp, ERROR_PROCESSED);
7401
7402         val = 0;
7403         if (tg3_flag(tp, 5780_CLASS))
7404                 val = tr32(MEMARB_MODE);
7405         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7406
7407         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7408                 tg3_stop_fw(tp);
7409                 tw32(0x5000, 0x400);
7410         }
7411
7412         tw32(GRC_MODE, tp->grc_mode);
7413
7414         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7415                 val = tr32(0xc4);
7416
7417                 tw32(0xc4, val | (1 << 15));
7418         }
7419
7420         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7422                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7423                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7424                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7425                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7426         }
7427
7428         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7429                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7430                 val = tp->mac_mode;
7431         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7432                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7433                 val = tp->mac_mode;
7434         } else
7435                 val = 0;
7436
7437         tw32_f(MAC_MODE, val);
7438         udelay(40);
7439
7440         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7441
7442         err = tg3_poll_fw(tp);
7443         if (err)
7444                 return err;
7445
7446         tg3_mdio_start(tp);
7447
7448         if (tg3_flag(tp, PCI_EXPRESS) &&
7449             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7450             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7451             !tg3_flag(tp, 57765_PLUS)) {
7452                 val = tr32(0x7c00);
7453
7454                 tw32(0x7c00, val | (1 << 25));
7455         }
7456
7457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7458                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7459                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7460         }
7461
7462         /* Reprobe ASF enable state.  */
7463         tg3_flag_clear(tp, ENABLE_ASF);
7464         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7465         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7466         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7467                 u32 nic_cfg;
7468
7469                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7470                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7471                         tg3_flag_set(tp, ENABLE_ASF);
7472                         tp->last_event_jiffies = jiffies;
7473                         if (tg3_flag(tp, 5750_PLUS))
7474                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7475                 }
7476         }
7477
7478         return 0;
7479 }
7480
7481 /* tp->lock is held. */
7482 static void tg3_stop_fw(struct tg3 *tp)
7483 {
7484         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7485                 /* Wait for RX cpu to ACK the previous event. */
7486                 tg3_wait_for_event_ack(tp);
7487
7488                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7489
7490                 tg3_generate_fw_event(tp);
7491
7492                 /* Wait for RX cpu to ACK this event. */
7493                 tg3_wait_for_event_ack(tp);
7494         }
7495 }
7496
7497 /* tp->lock is held. */
7498 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7499 {
7500         int err;
7501
7502         tg3_stop_fw(tp);
7503
7504         tg3_write_sig_pre_reset(tp, kind);
7505
7506         tg3_abort_hw(tp, silent);
7507         err = tg3_chip_reset(tp);
7508
7509         __tg3_set_mac_addr(tp, 0);
7510
7511         tg3_write_sig_legacy(tp, kind);
7512         tg3_write_sig_post_reset(tp, kind);
7513
7514         if (err)
7515                 return err;
7516
7517         return 0;
7518 }
7519
7520 #define RX_CPU_SCRATCH_BASE     0x30000
7521 #define RX_CPU_SCRATCH_SIZE     0x04000
7522 #define TX_CPU_SCRATCH_BASE     0x34000
7523 #define TX_CPU_SCRATCH_SIZE     0x04000
7524
7525 /* tp->lock is held. */
7526 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7527 {
7528         int i;
7529
7530         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7531
7532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7533                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7534
7535                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7536                 return 0;
7537         }
7538         if (offset == RX_CPU_BASE) {
7539                 for (i = 0; i < 10000; i++) {
7540                         tw32(offset + CPU_STATE, 0xffffffff);
7541                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7542                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7543                                 break;
7544                 }
7545
7546                 tw32(offset + CPU_STATE, 0xffffffff);
7547                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7548                 udelay(10);
7549         } else {
7550                 for (i = 0; i < 10000; i++) {
7551                         tw32(offset + CPU_STATE, 0xffffffff);
7552                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7553                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7554                                 break;
7555                 }
7556         }
7557
7558         if (i >= 10000) {
7559                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7560                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7561                 return -ENODEV;
7562         }
7563
7564         /* Clear firmware's nvram arbitration. */
7565         if (tg3_flag(tp, NVRAM))
7566                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7567         return 0;
7568 }
7569
7570 struct fw_info {
7571         unsigned int fw_base;
7572         unsigned int fw_len;
7573         const __be32 *fw_data;
7574 };
7575
7576 /* tp->lock is held. */
7577 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7578                                  int cpu_scratch_size, struct fw_info *info)
7579 {
7580         int err, lock_err, i;
7581         void (*write_op)(struct tg3 *, u32, u32);
7582
7583         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7584                 netdev_err(tp->dev,
7585                            "%s: Trying to load TX cpu firmware which is 5705\n",
7586                            __func__);
7587                 return -EINVAL;
7588         }
7589
7590         if (tg3_flag(tp, 5705_PLUS))
7591                 write_op = tg3_write_mem;
7592         else
7593                 write_op = tg3_write_indirect_reg32;
7594
7595         /* It is possible that bootcode is still loading at this point.
7596          * Get the nvram lock first before halting the cpu.
7597          */
7598         lock_err = tg3_nvram_lock(tp);
7599         err = tg3_halt_cpu(tp, cpu_base);
7600         if (!lock_err)
7601                 tg3_nvram_unlock(tp);
7602         if (err)
7603                 goto out;
7604
7605         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7606                 write_op(tp, cpu_scratch_base + i, 0);
7607         tw32(cpu_base + CPU_STATE, 0xffffffff);
7608         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7609         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7610                 write_op(tp, (cpu_scratch_base +
7611                               (info->fw_base & 0xffff) +
7612                               (i * sizeof(u32))),
7613                               be32_to_cpu(info->fw_data[i]));
7614
7615         err = 0;
7616
7617 out:
7618         return err;
7619 }
7620
7621 /* tp->lock is held. */
7622 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7623 {
7624         struct fw_info info;
7625         const __be32 *fw_data;
7626         int err, i;
7627
7628         fw_data = (void *)tp->fw->data;
7629
7630         /* Firmware blob starts with version numbers, followed by
7631            start address and length. We are setting complete length.
7632            length = end_address_of_bss - start_address_of_text.
7633            Remainder is the blob to be loaded contiguously
7634            from start address. */
7635
7636         info.fw_base = be32_to_cpu(fw_data[1]);
7637         info.fw_len = tp->fw->size - 12;
7638         info.fw_data = &fw_data[3];
7639
7640         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7641                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7642                                     &info);
7643         if (err)
7644                 return err;
7645
7646         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7647                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7648                                     &info);
7649         if (err)
7650                 return err;
7651
7652         /* Now startup only the RX cpu. */
7653         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7654         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7655
7656         for (i = 0; i < 5; i++) {
7657                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7658                         break;
7659                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7660                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7661                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7662                 udelay(1000);
7663         }
7664         if (i >= 5) {
7665                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7666                            "should be %08x\n", __func__,
7667                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7668                 return -ENODEV;
7669         }
7670         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7671         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7672
7673         return 0;
7674 }
7675
7676 /* tp->lock is held. */
7677 static int tg3_load_tso_firmware(struct tg3 *tp)
7678 {
7679         struct fw_info info;
7680         const __be32 *fw_data;
7681         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7682         int err, i;
7683
7684         if (tg3_flag(tp, HW_TSO_1) ||
7685             tg3_flag(tp, HW_TSO_2) ||
7686             tg3_flag(tp, HW_TSO_3))
7687                 return 0;
7688
7689         fw_data = (void *)tp->fw->data;
7690
7691         /* Firmware blob starts with version numbers, followed by
7692            start address and length. We are setting complete length.
7693            length = end_address_of_bss - start_address_of_text.
7694            Remainder is the blob to be loaded contiguously
7695            from start address. */
7696
7697         info.fw_base = be32_to_cpu(fw_data[1]);
7698         cpu_scratch_size = tp->fw_len;
7699         info.fw_len = tp->fw->size - 12;
7700         info.fw_data = &fw_data[3];
7701
7702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7703                 cpu_base = RX_CPU_BASE;
7704                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7705         } else {
7706                 cpu_base = TX_CPU_BASE;
7707                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7708                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7709         }
7710
7711         err = tg3_load_firmware_cpu(tp, cpu_base,
7712                                     cpu_scratch_base, cpu_scratch_size,
7713                                     &info);
7714         if (err)
7715                 return err;
7716
7717         /* Now startup the cpu. */
7718         tw32(cpu_base + CPU_STATE, 0xffffffff);
7719         tw32_f(cpu_base + CPU_PC, info.fw_base);
7720
7721         for (i = 0; i < 5; i++) {
7722                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7723                         break;
7724                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7725                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7726                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7727                 udelay(1000);
7728         }
7729         if (i >= 5) {
7730                 netdev_err(tp->dev,
7731                            "%s fails to set CPU PC, is %08x should be %08x\n",
7732                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7733                 return -ENODEV;
7734         }
7735         tw32(cpu_base + CPU_STATE, 0xffffffff);
7736         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7737         return 0;
7738 }
7739
7740
7741 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7742 {
7743         struct tg3 *tp = netdev_priv(dev);
7744         struct sockaddr *addr = p;
7745         int err = 0, skip_mac_1 = 0;
7746
7747         if (!is_valid_ether_addr(addr->sa_data))
7748                 return -EINVAL;
7749
7750         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7751
7752         if (!netif_running(dev))
7753                 return 0;
7754
7755         if (tg3_flag(tp, ENABLE_ASF)) {
7756                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7757
7758                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7759                 addr0_low = tr32(MAC_ADDR_0_LOW);
7760                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7761                 addr1_low = tr32(MAC_ADDR_1_LOW);
7762
7763                 /* Skip MAC addr 1 if ASF is using it. */
7764                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7765                     !(addr1_high == 0 && addr1_low == 0))
7766                         skip_mac_1 = 1;
7767         }
7768         spin_lock_bh(&tp->lock);
7769         __tg3_set_mac_addr(tp, skip_mac_1);
7770         spin_unlock_bh(&tp->lock);
7771
7772         return err;
7773 }
7774
7775 /* tp->lock is held. */
7776 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7777                            dma_addr_t mapping, u32 maxlen_flags,
7778                            u32 nic_addr)
7779 {
7780         tg3_write_mem(tp,
7781                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7782                       ((u64) mapping >> 32));
7783         tg3_write_mem(tp,
7784                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7785                       ((u64) mapping & 0xffffffff));
7786         tg3_write_mem(tp,
7787                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7788                        maxlen_flags);
7789
7790         if (!tg3_flag(tp, 5705_PLUS))
7791                 tg3_write_mem(tp,
7792                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7793                               nic_addr);
7794 }
7795
7796 static void __tg3_set_rx_mode(struct net_device *);
7797 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7798 {
7799         int i;
7800
7801         if (!tg3_flag(tp, ENABLE_TSS)) {
7802                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7803                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7804                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7805         } else {
7806                 tw32(HOSTCC_TXCOL_TICKS, 0);
7807                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7808                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7809         }
7810
7811         if (!tg3_flag(tp, ENABLE_RSS)) {
7812                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7813                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7814                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7815         } else {
7816                 tw32(HOSTCC_RXCOL_TICKS, 0);
7817                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7818                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7819         }
7820
7821         if (!tg3_flag(tp, 5705_PLUS)) {
7822                 u32 val = ec->stats_block_coalesce_usecs;
7823
7824                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7825                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7826
7827                 if (!netif_carrier_ok(tp->dev))
7828                         val = 0;
7829
7830                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7831         }
7832
7833         for (i = 0; i < tp->irq_cnt - 1; i++) {
7834                 u32 reg;
7835
7836                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7837                 tw32(reg, ec->rx_coalesce_usecs);
7838                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7839                 tw32(reg, ec->rx_max_coalesced_frames);
7840                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7841                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7842
7843                 if (tg3_flag(tp, ENABLE_TSS)) {
7844                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7845                         tw32(reg, ec->tx_coalesce_usecs);
7846                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7847                         tw32(reg, ec->tx_max_coalesced_frames);
7848                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7849                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7850                 }
7851         }
7852
7853         for (; i < tp->irq_max - 1; i++) {
7854                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7855                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7856                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7857
7858                 if (tg3_flag(tp, ENABLE_TSS)) {
7859                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7860                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7861                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7862                 }
7863         }
7864 }
7865
7866 /* tp->lock is held. */
7867 static void tg3_rings_reset(struct tg3 *tp)
7868 {
7869         int i;
7870         u32 stblk, txrcb, rxrcb, limit;
7871         struct tg3_napi *tnapi = &tp->napi[0];
7872
7873         /* Disable all transmit rings but the first. */
7874         if (!tg3_flag(tp, 5705_PLUS))
7875                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7876         else if (tg3_flag(tp, 5717_PLUS))
7877                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7878         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7879                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7880         else
7881                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7882
7883         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7884              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7885                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7886                               BDINFO_FLAGS_DISABLED);
7887
7888
7889         /* Disable all receive return rings but the first. */
7890         if (tg3_flag(tp, 5717_PLUS))
7891                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7892         else if (!tg3_flag(tp, 5705_PLUS))
7893                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7894         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7895                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7896                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7897         else
7898                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7899
7900         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7901              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7902                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7903                               BDINFO_FLAGS_DISABLED);
7904
7905         /* Disable interrupts */
7906         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7907         tp->napi[0].chk_msi_cnt = 0;
7908         tp->napi[0].last_rx_cons = 0;
7909         tp->napi[0].last_tx_cons = 0;
7910
7911         /* Zero mailbox registers. */
7912         if (tg3_flag(tp, SUPPORT_MSIX)) {
7913                 for (i = 1; i < tp->irq_max; i++) {
7914                         tp->napi[i].tx_prod = 0;
7915                         tp->napi[i].tx_cons = 0;
7916                         if (tg3_flag(tp, ENABLE_TSS))
7917                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7918                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7919                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7920                         tp->napi[0].chk_msi_cnt = 0;
7921                         tp->napi[i].last_rx_cons = 0;
7922                         tp->napi[i].last_tx_cons = 0;
7923                 }
7924                 if (!tg3_flag(tp, ENABLE_TSS))
7925                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7926         } else {
7927                 tp->napi[0].tx_prod = 0;
7928                 tp->napi[0].tx_cons = 0;
7929                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7930                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7931         }
7932
7933         /* Make sure the NIC-based send BD rings are disabled. */
7934         if (!tg3_flag(tp, 5705_PLUS)) {
7935                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7936                 for (i = 0; i < 16; i++)
7937                         tw32_tx_mbox(mbox + i * 8, 0);
7938         }
7939
7940         txrcb = NIC_SRAM_SEND_RCB;
7941         rxrcb = NIC_SRAM_RCV_RET_RCB;
7942
7943         /* Clear status block in ram. */
7944         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7945
7946         /* Set status block DMA address */
7947         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7948              ((u64) tnapi->status_mapping >> 32));
7949         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7950              ((u64) tnapi->status_mapping & 0xffffffff));
7951
7952         if (tnapi->tx_ring) {
7953                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7954                                (TG3_TX_RING_SIZE <<
7955                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7956                                NIC_SRAM_TX_BUFFER_DESC);
7957                 txrcb += TG3_BDINFO_SIZE;
7958         }
7959
7960         if (tnapi->rx_rcb) {
7961                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7962                                (tp->rx_ret_ring_mask + 1) <<
7963                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7964                 rxrcb += TG3_BDINFO_SIZE;
7965         }
7966
7967         stblk = HOSTCC_STATBLCK_RING1;
7968
7969         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7970                 u64 mapping = (u64)tnapi->status_mapping;
7971                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7972                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7973
7974                 /* Clear status block in ram. */
7975                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7976
7977                 if (tnapi->tx_ring) {
7978                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7979                                        (TG3_TX_RING_SIZE <<
7980                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7981                                        NIC_SRAM_TX_BUFFER_DESC);
7982                         txrcb += TG3_BDINFO_SIZE;
7983                 }
7984
7985                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7986                                ((tp->rx_ret_ring_mask + 1) <<
7987                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7988
7989                 stblk += 8;
7990                 rxrcb += TG3_BDINFO_SIZE;
7991         }
7992 }
7993
7994 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7995 {
7996         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7997
7998         if (!tg3_flag(tp, 5750_PLUS) ||
7999             tg3_flag(tp, 5780_CLASS) ||
8000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8002                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8003         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8004                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8005                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8006         else
8007                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8008
8009         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8010         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8011
8012         val = min(nic_rep_thresh, host_rep_thresh);
8013         tw32(RCVBDI_STD_THRESH, val);
8014
8015         if (tg3_flag(tp, 57765_PLUS))
8016                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8017
8018         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8019                 return;
8020
8021         if (!tg3_flag(tp, 5705_PLUS))
8022                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8023         else
8024                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8025
8026         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8027
8028         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8029         tw32(RCVBDI_JUMBO_THRESH, val);
8030
8031         if (tg3_flag(tp, 57765_PLUS))
8032                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8033 }
8034
8035 /* tp->lock is held. */
8036 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8037 {
8038         u32 val, rdmac_mode;
8039         int i, err, limit;
8040         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8041
8042         tg3_disable_ints(tp);
8043
8044         tg3_stop_fw(tp);
8045
8046         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8047
8048         if (tg3_flag(tp, INIT_COMPLETE))
8049                 tg3_abort_hw(tp, 1);
8050
8051         /* Enable MAC control of LPI */
8052         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8053                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8054                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8055                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8056
8057                 tw32_f(TG3_CPMU_EEE_CTRL,
8058                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8059
8060                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8061                       TG3_CPMU_EEEMD_LPI_IN_TX |
8062                       TG3_CPMU_EEEMD_LPI_IN_RX |
8063                       TG3_CPMU_EEEMD_EEE_ENABLE;
8064
8065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8066                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8067
8068                 if (tg3_flag(tp, ENABLE_APE))
8069                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8070
8071                 tw32_f(TG3_CPMU_EEE_MODE, val);
8072
8073                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8074                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8075                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8076
8077                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8078                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8079                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8080         }
8081
8082         if (reset_phy)
8083                 tg3_phy_reset(tp);
8084
8085         err = tg3_chip_reset(tp);
8086         if (err)
8087                 return err;
8088
8089         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8090
8091         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8092                 val = tr32(TG3_CPMU_CTRL);
8093                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8094                 tw32(TG3_CPMU_CTRL, val);
8095
8096                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8097                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8098                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8099                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8100
8101                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8102                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8103                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8104                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8105
8106                 val = tr32(TG3_CPMU_HST_ACC);
8107                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8108                 val |= CPMU_HST_ACC_MACCLK_6_25;
8109                 tw32(TG3_CPMU_HST_ACC, val);
8110         }
8111
8112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8113                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8114                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8115                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8116                 tw32(PCIE_PWR_MGMT_THRESH, val);
8117
8118                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8119                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8120
8121                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8122
8123                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8124                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8125         }
8126
8127         if (tg3_flag(tp, L1PLLPD_EN)) {
8128                 u32 grc_mode = tr32(GRC_MODE);
8129
8130                 /* Access the lower 1K of PL PCIE block registers. */
8131                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8132                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8133
8134                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8135                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8136                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8137
8138                 tw32(GRC_MODE, grc_mode);
8139         }
8140
8141         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8142                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8143                         u32 grc_mode = tr32(GRC_MODE);
8144
8145                         /* Access the lower 1K of PL PCIE block registers. */
8146                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8147                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8148
8149                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8150                                    TG3_PCIE_PL_LO_PHYCTL5);
8151                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8152                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8153
8154                         tw32(GRC_MODE, grc_mode);
8155                 }
8156
8157                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8158                         u32 grc_mode = tr32(GRC_MODE);
8159
8160                         /* Access the lower 1K of DL PCIE block registers. */
8161                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8162                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8163
8164                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8165                                    TG3_PCIE_DL_LO_FTSMAX);
8166                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8167                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8168                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8169
8170                         tw32(GRC_MODE, grc_mode);
8171                 }
8172
8173                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8174                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8175                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8176                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8177         }
8178
8179         /* This works around an issue with Athlon chipsets on
8180          * B3 tigon3 silicon.  This bit has no effect on any
8181          * other revision.  But do not set this on PCI Express
8182          * chips and don't even touch the clocks if the CPMU is present.
8183          */
8184         if (!tg3_flag(tp, CPMU_PRESENT)) {
8185                 if (!tg3_flag(tp, PCI_EXPRESS))
8186                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8187                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8188         }
8189
8190         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8191             tg3_flag(tp, PCIX_MODE)) {
8192                 val = tr32(TG3PCI_PCISTATE);
8193                 val |= PCISTATE_RETRY_SAME_DMA;
8194                 tw32(TG3PCI_PCISTATE, val);
8195         }
8196
8197         if (tg3_flag(tp, ENABLE_APE)) {
8198                 /* Allow reads and writes to the
8199                  * APE register and memory space.
8200                  */
8201                 val = tr32(TG3PCI_PCISTATE);
8202                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8203                        PCISTATE_ALLOW_APE_SHMEM_WR |
8204                        PCISTATE_ALLOW_APE_PSPACE_WR;
8205                 tw32(TG3PCI_PCISTATE, val);
8206         }
8207
8208         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8209                 /* Enable some hw fixes.  */
8210                 val = tr32(TG3PCI_MSI_DATA);
8211                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8212                 tw32(TG3PCI_MSI_DATA, val);
8213         }
8214
8215         /* Descriptor ring init may make accesses to the
8216          * NIC SRAM area to setup the TX descriptors, so we
8217          * can only do this after the hardware has been
8218          * successfully reset.
8219          */
8220         err = tg3_init_rings(tp);
8221         if (err)
8222                 return err;
8223
8224         if (tg3_flag(tp, 57765_PLUS)) {
8225                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8226                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8227                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8228                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8229                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8230                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8231                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8232                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8233         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8234                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8235                 /* This value is determined during the probe time DMA
8236                  * engine test, tg3_test_dma.
8237                  */
8238                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8239         }
8240
8241         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8242                           GRC_MODE_4X_NIC_SEND_RINGS |
8243                           GRC_MODE_NO_TX_PHDR_CSUM |
8244                           GRC_MODE_NO_RX_PHDR_CSUM);
8245         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8246
8247         /* Pseudo-header checksum is done by hardware logic and not
8248          * the offload processers, so make the chip do the pseudo-
8249          * header checksums on receive.  For transmit it is more
8250          * convenient to do the pseudo-header checksum in software
8251          * as Linux does that on transmit for us in all cases.
8252          */
8253         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8254
8255         tw32(GRC_MODE,
8256              tp->grc_mode |
8257              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8258
8259         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8260         val = tr32(GRC_MISC_CFG);
8261         val &= ~0xff;
8262         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8263         tw32(GRC_MISC_CFG, val);
8264
8265         /* Initialize MBUF/DESC pool. */
8266         if (tg3_flag(tp, 5750_PLUS)) {
8267                 /* Do nothing.  */
8268         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8269                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8271                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8272                 else
8273                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8274                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8275                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8276         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8277                 int fw_len;
8278
8279                 fw_len = tp->fw_len;
8280                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8281                 tw32(BUFMGR_MB_POOL_ADDR,
8282                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8283                 tw32(BUFMGR_MB_POOL_SIZE,
8284                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8285         }
8286
8287         if (tp->dev->mtu <= ETH_DATA_LEN) {
8288                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8289                      tp->bufmgr_config.mbuf_read_dma_low_water);
8290                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8291                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8292                 tw32(BUFMGR_MB_HIGH_WATER,
8293                      tp->bufmgr_config.mbuf_high_water);
8294         } else {
8295                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8296                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8297                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8298                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8299                 tw32(BUFMGR_MB_HIGH_WATER,
8300                      tp->bufmgr_config.mbuf_high_water_jumbo);
8301         }
8302         tw32(BUFMGR_DMA_LOW_WATER,
8303              tp->bufmgr_config.dma_low_water);
8304         tw32(BUFMGR_DMA_HIGH_WATER,
8305              tp->bufmgr_config.dma_high_water);
8306
8307         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8309                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8311             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8312             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8313                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8314         tw32(BUFMGR_MODE, val);
8315         for (i = 0; i < 2000; i++) {
8316                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8317                         break;
8318                 udelay(10);
8319         }
8320         if (i >= 2000) {
8321                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8322                 return -ENODEV;
8323         }
8324
8325         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8326                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8327
8328         tg3_setup_rxbd_thresholds(tp);
8329
8330         /* Initialize TG3_BDINFO's at:
8331          *  RCVDBDI_STD_BD:     standard eth size rx ring
8332          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8333          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8334          *
8335          * like so:
8336          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8337          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8338          *                              ring attribute flags
8339          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8340          *
8341          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8342          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8343          *
8344          * The size of each ring is fixed in the firmware, but the location is
8345          * configurable.
8346          */
8347         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8348              ((u64) tpr->rx_std_mapping >> 32));
8349         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8350              ((u64) tpr->rx_std_mapping & 0xffffffff));
8351         if (!tg3_flag(tp, 5717_PLUS))
8352                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8353                      NIC_SRAM_RX_BUFFER_DESC);
8354
8355         /* Disable the mini ring */
8356         if (!tg3_flag(tp, 5705_PLUS))
8357                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8358                      BDINFO_FLAGS_DISABLED);
8359
8360         /* Program the jumbo buffer descriptor ring control
8361          * blocks on those devices that have them.
8362          */
8363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8364             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8365
8366                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8367                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8368                              ((u64) tpr->rx_jmb_mapping >> 32));
8369                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8370                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8371                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8372                               BDINFO_FLAGS_MAXLEN_SHIFT;
8373                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8374                              val | BDINFO_FLAGS_USE_EXT_RECV);
8375                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8376                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8377                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8378                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8379                 } else {
8380                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8381                              BDINFO_FLAGS_DISABLED);
8382                 }
8383
8384                 if (tg3_flag(tp, 57765_PLUS)) {
8385                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8386                                 val = TG3_RX_STD_MAX_SIZE_5700;
8387                         else
8388                                 val = TG3_RX_STD_MAX_SIZE_5717;
8389                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8390                         val |= (TG3_RX_STD_DMA_SZ << 2);
8391                 } else
8392                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8393         } else
8394                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8395
8396         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8397
8398         tpr->rx_std_prod_idx = tp->rx_pending;
8399         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8400
8401         tpr->rx_jmb_prod_idx =
8402                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8403         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8404
8405         tg3_rings_reset(tp);
8406
8407         /* Initialize MAC address and backoff seed. */
8408         __tg3_set_mac_addr(tp, 0);
8409
8410         /* MTU + ethernet header + FCS + optional VLAN tag */
8411         tw32(MAC_RX_MTU_SIZE,
8412              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8413
8414         /* The slot time is changed by tg3_setup_phy if we
8415          * run at gigabit with half duplex.
8416          */
8417         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8418               (6 << TX_LENGTHS_IPG_SHIFT) |
8419               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8420
8421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8422                 val |= tr32(MAC_TX_LENGTHS) &
8423                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8424                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8425
8426         tw32(MAC_TX_LENGTHS, val);
8427
8428         /* Receive rules. */
8429         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8430         tw32(RCVLPC_CONFIG, 0x0181);
8431
8432         /* Calculate RDMAC_MODE setting early, we need it to determine
8433          * the RCVLPC_STATE_ENABLE mask.
8434          */
8435         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8436                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8437                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8438                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8439                       RDMAC_MODE_LNGREAD_ENAB);
8440
8441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8442                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8443
8444         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8446             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8447                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8448                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8449                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8450
8451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8452             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8453                 if (tg3_flag(tp, TSO_CAPABLE) &&
8454                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8455                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8456                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8457                            !tg3_flag(tp, IS_5788)) {
8458                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8459                 }
8460         }
8461
8462         if (tg3_flag(tp, PCI_EXPRESS))
8463                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8464
8465         if (tg3_flag(tp, HW_TSO_1) ||
8466             tg3_flag(tp, HW_TSO_2) ||
8467             tg3_flag(tp, HW_TSO_3))
8468                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8469
8470         if (tg3_flag(tp, 57765_PLUS) ||
8471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8473                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8474
8475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8476                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8477
8478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8479             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8480             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8482             tg3_flag(tp, 57765_PLUS)) {
8483                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8484                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8485                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8486                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8487                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8488                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8489                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8490                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8491                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8492                 }
8493                 tw32(TG3_RDMA_RSRVCTRL_REG,
8494                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8495         }
8496
8497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8498             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8499                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8500                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8501                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8502                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8503         }
8504
8505         /* Receive/send statistics. */
8506         if (tg3_flag(tp, 5750_PLUS)) {
8507                 val = tr32(RCVLPC_STATS_ENABLE);
8508                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8509                 tw32(RCVLPC_STATS_ENABLE, val);
8510         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8511                    tg3_flag(tp, TSO_CAPABLE)) {
8512                 val = tr32(RCVLPC_STATS_ENABLE);
8513                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8514                 tw32(RCVLPC_STATS_ENABLE, val);
8515         } else {
8516                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8517         }
8518         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8519         tw32(SNDDATAI_STATSENAB, 0xffffff);
8520         tw32(SNDDATAI_STATSCTRL,
8521              (SNDDATAI_SCTRL_ENABLE |
8522               SNDDATAI_SCTRL_FASTUPD));
8523
8524         /* Setup host coalescing engine. */
8525         tw32(HOSTCC_MODE, 0);
8526         for (i = 0; i < 2000; i++) {
8527                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8528                         break;
8529                 udelay(10);
8530         }
8531
8532         __tg3_set_coalesce(tp, &tp->coal);
8533
8534         if (!tg3_flag(tp, 5705_PLUS)) {
8535                 /* Status/statistics block address.  See tg3_timer,
8536                  * the tg3_periodic_fetch_stats call there, and
8537                  * tg3_get_stats to see how this works for 5705/5750 chips.
8538                  */
8539                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8540                      ((u64) tp->stats_mapping >> 32));
8541                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8542                      ((u64) tp->stats_mapping & 0xffffffff));
8543                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8544
8545                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8546
8547                 /* Clear statistics and status block memory areas */
8548                 for (i = NIC_SRAM_STATS_BLK;
8549                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8550                      i += sizeof(u32)) {
8551                         tg3_write_mem(tp, i, 0);
8552                         udelay(40);
8553                 }
8554         }
8555
8556         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8557
8558         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8559         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8560         if (!tg3_flag(tp, 5705_PLUS))
8561                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8562
8563         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8564                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8565                 /* reset to prevent losing 1st rx packet intermittently */
8566                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8567                 udelay(10);
8568         }
8569
8570         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8571                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8572                         MAC_MODE_FHDE_ENABLE;
8573         if (tg3_flag(tp, ENABLE_APE))
8574                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8575         if (!tg3_flag(tp, 5705_PLUS) &&
8576             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8577             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8578                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8579         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8580         udelay(40);
8581
8582         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8583          * If TG3_FLAG_IS_NIC is zero, we should read the
8584          * register to preserve the GPIO settings for LOMs. The GPIOs,
8585          * whether used as inputs or outputs, are set by boot code after
8586          * reset.
8587          */
8588         if (!tg3_flag(tp, IS_NIC)) {
8589                 u32 gpio_mask;
8590
8591                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8592                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8593                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8594
8595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8596                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8597                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8598
8599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8600                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8601
8602                 tp->grc_local_ctrl &= ~gpio_mask;
8603                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8604
8605                 /* GPIO1 must be driven high for eeprom write protect */
8606                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8607                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8608                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8609         }
8610         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8611         udelay(100);
8612
8613         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8614                 val = tr32(MSGINT_MODE);
8615                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8616                 tw32(MSGINT_MODE, val);
8617         }
8618
8619         if (!tg3_flag(tp, 5705_PLUS)) {
8620                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8621                 udelay(40);
8622         }
8623
8624         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8625                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8626                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8627                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8628                WDMAC_MODE_LNGREAD_ENAB);
8629
8630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8631             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8632                 if (tg3_flag(tp, TSO_CAPABLE) &&
8633                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8634                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8635                         /* nothing */
8636                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8637                            !tg3_flag(tp, IS_5788)) {
8638                         val |= WDMAC_MODE_RX_ACCEL;
8639                 }
8640         }
8641
8642         /* Enable host coalescing bug fix */
8643         if (tg3_flag(tp, 5755_PLUS))
8644                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8645
8646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8647                 val |= WDMAC_MODE_BURST_ALL_DATA;
8648
8649         tw32_f(WDMAC_MODE, val);
8650         udelay(40);
8651
8652         if (tg3_flag(tp, PCIX_MODE)) {
8653                 u16 pcix_cmd;
8654
8655                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8656                                      &pcix_cmd);
8657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8658                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8659                         pcix_cmd |= PCI_X_CMD_READ_2K;
8660                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8661                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8662                         pcix_cmd |= PCI_X_CMD_READ_2K;
8663                 }
8664                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8665                                       pcix_cmd);
8666         }
8667
8668         tw32_f(RDMAC_MODE, rdmac_mode);
8669         udelay(40);
8670
8671         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8672         if (!tg3_flag(tp, 5705_PLUS))
8673                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8674
8675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8676                 tw32(SNDDATAC_MODE,
8677                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8678         else
8679                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8680
8681         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8682         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8683         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8684         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8685                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8686         tw32(RCVDBDI_MODE, val);
8687         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8688         if (tg3_flag(tp, HW_TSO_1) ||
8689             tg3_flag(tp, HW_TSO_2) ||
8690             tg3_flag(tp, HW_TSO_3))
8691                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8692         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8693         if (tg3_flag(tp, ENABLE_TSS))
8694                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8695         tw32(SNDBDI_MODE, val);
8696         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8697
8698         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8699                 err = tg3_load_5701_a0_firmware_fix(tp);
8700                 if (err)
8701                         return err;
8702         }
8703
8704         if (tg3_flag(tp, TSO_CAPABLE)) {
8705                 err = tg3_load_tso_firmware(tp);
8706                 if (err)
8707                         return err;
8708         }
8709
8710         tp->tx_mode = TX_MODE_ENABLE;
8711
8712         if (tg3_flag(tp, 5755_PLUS) ||
8713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8714                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8715
8716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8717                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8718                 tp->tx_mode &= ~val;
8719                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8720         }
8721
8722         tw32_f(MAC_TX_MODE, tp->tx_mode);
8723         udelay(100);
8724
8725         if (tg3_flag(tp, ENABLE_RSS)) {
8726                 u32 reg = MAC_RSS_INDIR_TBL_0;
8727                 u8 *ent = (u8 *)&val;
8728
8729                 /* Setup the indirection table */
8730                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8731                         int idx = i % sizeof(val);
8732
8733                         ent[idx] = i % (tp->irq_cnt - 1);
8734                         if (idx == sizeof(val) - 1) {
8735                                 tw32(reg, val);
8736                                 reg += 4;
8737                         }
8738                 }
8739
8740                 /* Setup the "secret" hash key. */
8741                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8742                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8743                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8744                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8745                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8746                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8747                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8748                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8749                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8750                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8751         }
8752
8753         tp->rx_mode = RX_MODE_ENABLE;
8754         if (tg3_flag(tp, 5755_PLUS))
8755                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8756
8757         if (tg3_flag(tp, ENABLE_RSS))
8758                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8759                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8760                                RX_MODE_RSS_IPV6_HASH_EN |
8761                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8762                                RX_MODE_RSS_IPV4_HASH_EN |
8763                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8764
8765         tw32_f(MAC_RX_MODE, tp->rx_mode);
8766         udelay(10);
8767
8768         tw32(MAC_LED_CTRL, tp->led_ctrl);
8769
8770         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8771         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8772                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8773                 udelay(10);
8774         }
8775         tw32_f(MAC_RX_MODE, tp->rx_mode);
8776         udelay(10);
8777
8778         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8779                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8780                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8781                         /* Set drive transmission level to 1.2V  */
8782                         /* only if the signal pre-emphasis bit is not set  */
8783                         val = tr32(MAC_SERDES_CFG);
8784                         val &= 0xfffff000;
8785                         val |= 0x880;
8786                         tw32(MAC_SERDES_CFG, val);
8787                 }
8788                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8789                         tw32(MAC_SERDES_CFG, 0x616000);
8790         }
8791
8792         /* Prevent chip from dropping frames when flow control
8793          * is enabled.
8794          */
8795         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8796                 val = 1;
8797         else
8798                 val = 2;
8799         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8800
8801         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8802             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8803                 /* Use hardware link auto-negotiation */
8804                 tg3_flag_set(tp, HW_AUTONEG);
8805         }
8806
8807         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8809                 u32 tmp;
8810
8811                 tmp = tr32(SERDES_RX_CTRL);
8812                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8813                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8814                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8815                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8816         }
8817
8818         if (!tg3_flag(tp, USE_PHYLIB)) {
8819                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8820                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8821                         tp->link_config.speed = tp->link_config.orig_speed;
8822                         tp->link_config.duplex = tp->link_config.orig_duplex;
8823                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8824                 }
8825
8826                 err = tg3_setup_phy(tp, 0);
8827                 if (err)
8828                         return err;
8829
8830                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8831                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8832                         u32 tmp;
8833
8834                         /* Clear CRC stats. */
8835                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8836                                 tg3_writephy(tp, MII_TG3_TEST1,
8837                                              tmp | MII_TG3_TEST1_CRC_EN);
8838                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8839                         }
8840                 }
8841         }
8842
8843         __tg3_set_rx_mode(tp->dev);
8844
8845         /* Initialize receive rules. */
8846         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8847         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8848         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8849         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8850
8851         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8852                 limit = 8;
8853         else
8854                 limit = 16;
8855         if (tg3_flag(tp, ENABLE_ASF))
8856                 limit -= 4;
8857         switch (limit) {
8858         case 16:
8859                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8860         case 15:
8861                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8862         case 14:
8863                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8864         case 13:
8865                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8866         case 12:
8867                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8868         case 11:
8869                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8870         case 10:
8871                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8872         case 9:
8873                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8874         case 8:
8875                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8876         case 7:
8877                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8878         case 6:
8879                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8880         case 5:
8881                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8882         case 4:
8883                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8884         case 3:
8885                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8886         case 2:
8887         case 1:
8888
8889         default:
8890                 break;
8891         }
8892
8893         if (tg3_flag(tp, ENABLE_APE))
8894                 /* Write our heartbeat update interval to APE. */
8895                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8896                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8897
8898         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8899
8900         return 0;
8901 }
8902
8903 /* Called at device open time to get the chip ready for
8904  * packet processing.  Invoked with tp->lock held.
8905  */
8906 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8907 {
8908         tg3_switch_clocks(tp);
8909
8910         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8911
8912         return tg3_reset_hw(tp, reset_phy);
8913 }
8914
8915 #define TG3_STAT_ADD32(PSTAT, REG) \
8916 do {    u32 __val = tr32(REG); \
8917         (PSTAT)->low += __val; \
8918         if ((PSTAT)->low < __val) \
8919                 (PSTAT)->high += 1; \
8920 } while (0)
8921
8922 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8923 {
8924         struct tg3_hw_stats *sp = tp->hw_stats;
8925
8926         if (!netif_carrier_ok(tp->dev))
8927                 return;
8928
8929         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8930         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8931         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8932         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8933         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8934         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8935         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8936         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8937         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8938         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8939         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8940         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8941         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8942
8943         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8944         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8945         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8946         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8947         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8948         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8949         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8950         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8951         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8952         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8953         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8954         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8955         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8956         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8957
8958         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8959         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8960             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8961             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8962                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8963         } else {
8964                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8965                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8966                 if (val) {
8967                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8968                         sp->rx_discards.low += val;
8969                         if (sp->rx_discards.low < val)
8970                                 sp->rx_discards.high += 1;
8971                 }
8972                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8973         }
8974         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8975 }
8976
8977 static void tg3_chk_missed_msi(struct tg3 *tp)
8978 {
8979         u32 i;
8980
8981         for (i = 0; i < tp->irq_cnt; i++) {
8982                 struct tg3_napi *tnapi = &tp->napi[i];
8983
8984                 if (tg3_has_work(tnapi)) {
8985                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8986                             tnapi->last_tx_cons == tnapi->tx_cons) {
8987                                 if (tnapi->chk_msi_cnt < 1) {
8988                                         tnapi->chk_msi_cnt++;
8989                                         return;
8990                                 }
8991                                 tw32_mailbox(tnapi->int_mbox,
8992                                              tnapi->last_tag << 24);
8993                         }
8994                 }
8995                 tnapi->chk_msi_cnt = 0;
8996                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8997                 tnapi->last_tx_cons = tnapi->tx_cons;
8998         }
8999 }
9000
9001 static void tg3_timer(unsigned long __opaque)
9002 {
9003         struct tg3 *tp = (struct tg3 *) __opaque;
9004
9005         if (tp->irq_sync)
9006                 goto restart_timer;
9007
9008         spin_lock(&tp->lock);
9009
9010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9011             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9012                 tg3_chk_missed_msi(tp);
9013
9014         if (!tg3_flag(tp, TAGGED_STATUS)) {
9015                 /* All of this garbage is because when using non-tagged
9016                  * IRQ status the mailbox/status_block protocol the chip
9017                  * uses with the cpu is race prone.
9018                  */
9019                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9020                         tw32(GRC_LOCAL_CTRL,
9021                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9022                 } else {
9023                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9024                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9025                 }
9026
9027                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9028                         tg3_flag_set(tp, RESTART_TIMER);
9029                         spin_unlock(&tp->lock);
9030                         schedule_work(&tp->reset_task);
9031                         return;
9032                 }
9033         }
9034
9035         /* This part only runs once per second. */
9036         if (!--tp->timer_counter) {
9037                 if (tg3_flag(tp, 5705_PLUS))
9038                         tg3_periodic_fetch_stats(tp);
9039
9040                 if (tp->setlpicnt && !--tp->setlpicnt)
9041                         tg3_phy_eee_enable(tp);
9042
9043                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9044                         u32 mac_stat;
9045                         int phy_event;
9046
9047                         mac_stat = tr32(MAC_STATUS);
9048
9049                         phy_event = 0;
9050                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9051                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9052                                         phy_event = 1;
9053                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9054                                 phy_event = 1;
9055
9056                         if (phy_event)
9057                                 tg3_setup_phy(tp, 0);
9058                 } else if (tg3_flag(tp, POLL_SERDES)) {
9059                         u32 mac_stat = tr32(MAC_STATUS);
9060                         int need_setup = 0;
9061
9062                         if (netif_carrier_ok(tp->dev) &&
9063                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9064                                 need_setup = 1;
9065                         }
9066                         if (!netif_carrier_ok(tp->dev) &&
9067                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9068                                          MAC_STATUS_SIGNAL_DET))) {
9069                                 need_setup = 1;
9070                         }
9071                         if (need_setup) {
9072                                 if (!tp->serdes_counter) {
9073                                         tw32_f(MAC_MODE,
9074                                              (tp->mac_mode &
9075                                               ~MAC_MODE_PORT_MODE_MASK));
9076                                         udelay(40);
9077                                         tw32_f(MAC_MODE, tp->mac_mode);
9078                                         udelay(40);
9079                                 }
9080                                 tg3_setup_phy(tp, 0);
9081                         }
9082                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9083                            tg3_flag(tp, 5780_CLASS)) {
9084                         tg3_serdes_parallel_detect(tp);
9085                 }
9086
9087                 tp->timer_counter = tp->timer_multiplier;
9088         }
9089
9090         /* Heartbeat is only sent once every 2 seconds.
9091          *
9092          * The heartbeat is to tell the ASF firmware that the host
9093          * driver is still alive.  In the event that the OS crashes,
9094          * ASF needs to reset the hardware to free up the FIFO space
9095          * that may be filled with rx packets destined for the host.
9096          * If the FIFO is full, ASF will no longer function properly.
9097          *
9098          * Unintended resets have been reported on real time kernels
9099          * where the timer doesn't run on time.  Netpoll will also have
9100          * same problem.
9101          *
9102          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9103          * to check the ring condition when the heartbeat is expiring
9104          * before doing the reset.  This will prevent most unintended
9105          * resets.
9106          */
9107         if (!--tp->asf_counter) {
9108                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9109                         tg3_wait_for_event_ack(tp);
9110
9111                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9112                                       FWCMD_NICDRV_ALIVE3);
9113                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9114                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9115                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9116
9117                         tg3_generate_fw_event(tp);
9118                 }
9119                 tp->asf_counter = tp->asf_multiplier;
9120         }
9121
9122         spin_unlock(&tp->lock);
9123
9124 restart_timer:
9125         tp->timer.expires = jiffies + tp->timer_offset;
9126         add_timer(&tp->timer);
9127 }
9128
9129 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9130 {
9131         irq_handler_t fn;
9132         unsigned long flags;
9133         char *name;
9134         struct tg3_napi *tnapi = &tp->napi[irq_num];
9135
9136         if (tp->irq_cnt == 1)
9137                 name = tp->dev->name;
9138         else {
9139                 name = &tnapi->irq_lbl[0];
9140                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9141                 name[IFNAMSIZ-1] = 0;
9142         }
9143
9144         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9145                 fn = tg3_msi;
9146                 if (tg3_flag(tp, 1SHOT_MSI))
9147                         fn = tg3_msi_1shot;
9148                 flags = 0;
9149         } else {
9150                 fn = tg3_interrupt;
9151                 if (tg3_flag(tp, TAGGED_STATUS))
9152                         fn = tg3_interrupt_tagged;
9153                 flags = IRQF_SHARED;
9154         }
9155
9156         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9157 }
9158
9159 static int tg3_test_interrupt(struct tg3 *tp)
9160 {
9161         struct tg3_napi *tnapi = &tp->napi[0];
9162         struct net_device *dev = tp->dev;
9163         int err, i, intr_ok = 0;
9164         u32 val;
9165
9166         if (!netif_running(dev))
9167                 return -ENODEV;
9168
9169         tg3_disable_ints(tp);
9170
9171         free_irq(tnapi->irq_vec, tnapi);
9172
9173         /*
9174          * Turn off MSI one shot mode.  Otherwise this test has no
9175          * observable way to know whether the interrupt was delivered.
9176          */
9177         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9178                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9179                 tw32(MSGINT_MODE, val);
9180         }
9181
9182         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9183                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9184         if (err)
9185                 return err;
9186
9187         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9188         tg3_enable_ints(tp);
9189
9190         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9191                tnapi->coal_now);
9192
9193         for (i = 0; i < 5; i++) {
9194                 u32 int_mbox, misc_host_ctrl;
9195
9196                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9197                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9198
9199                 if ((int_mbox != 0) ||
9200                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9201                         intr_ok = 1;
9202                         break;
9203                 }
9204
9205                 msleep(10);
9206         }
9207
9208         tg3_disable_ints(tp);
9209
9210         free_irq(tnapi->irq_vec, tnapi);
9211
9212         err = tg3_request_irq(tp, 0);
9213
9214         if (err)
9215                 return err;
9216
9217         if (intr_ok) {
9218                 /* Reenable MSI one shot mode. */
9219                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9220                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9221                         tw32(MSGINT_MODE, val);
9222                 }
9223                 return 0;
9224         }
9225
9226         return -EIO;
9227 }
9228
9229 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9230  * successfully restored
9231  */
9232 static int tg3_test_msi(struct tg3 *tp)
9233 {
9234         int err;
9235         u16 pci_cmd;
9236
9237         if (!tg3_flag(tp, USING_MSI))
9238                 return 0;
9239
9240         /* Turn off SERR reporting in case MSI terminates with Master
9241          * Abort.
9242          */
9243         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9244         pci_write_config_word(tp->pdev, PCI_COMMAND,
9245                               pci_cmd & ~PCI_COMMAND_SERR);
9246
9247         err = tg3_test_interrupt(tp);
9248
9249         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9250
9251         if (!err)
9252                 return 0;
9253
9254         /* other failures */
9255         if (err != -EIO)
9256                 return err;
9257
9258         /* MSI test failed, go back to INTx mode */
9259         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9260                     "to INTx mode. Please report this failure to the PCI "
9261                     "maintainer and include system chipset information\n");
9262
9263         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9264
9265         pci_disable_msi(tp->pdev);
9266
9267         tg3_flag_clear(tp, USING_MSI);
9268         tp->napi[0].irq_vec = tp->pdev->irq;
9269
9270         err = tg3_request_irq(tp, 0);
9271         if (err)
9272                 return err;
9273
9274         /* Need to reset the chip because the MSI cycle may have terminated
9275          * with Master Abort.
9276          */
9277         tg3_full_lock(tp, 1);
9278
9279         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9280         err = tg3_init_hw(tp, 1);
9281
9282         tg3_full_unlock(tp);
9283
9284         if (err)
9285                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9286
9287         return err;
9288 }
9289
9290 static int tg3_request_firmware(struct tg3 *tp)
9291 {
9292         const __be32 *fw_data;
9293
9294         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9295                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9296                            tp->fw_needed);
9297                 return -ENOENT;
9298         }
9299
9300         fw_data = (void *)tp->fw->data;
9301
9302         /* Firmware blob starts with version numbers, followed by
9303          * start address and _full_ length including BSS sections
9304          * (which must be longer than the actual data, of course
9305          */
9306
9307         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9308         if (tp->fw_len < (tp->fw->size - 12)) {
9309                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9310                            tp->fw_len, tp->fw_needed);
9311                 release_firmware(tp->fw);
9312                 tp->fw = NULL;
9313                 return -EINVAL;
9314         }
9315
9316         /* We no longer need firmware; we have it. */
9317         tp->fw_needed = NULL;
9318         return 0;
9319 }
9320
9321 static bool tg3_enable_msix(struct tg3 *tp)
9322 {
9323         int i, rc, cpus = num_online_cpus();
9324         struct msix_entry msix_ent[tp->irq_max];
9325
9326         if (cpus == 1)
9327                 /* Just fallback to the simpler MSI mode. */
9328                 return false;
9329
9330         /*
9331          * We want as many rx rings enabled as there are cpus.
9332          * The first MSIX vector only deals with link interrupts, etc,
9333          * so we add one to the number of vectors we are requesting.
9334          */
9335         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9336
9337         for (i = 0; i < tp->irq_max; i++) {
9338                 msix_ent[i].entry  = i;
9339                 msix_ent[i].vector = 0;
9340         }
9341
9342         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9343         if (rc < 0) {
9344                 return false;
9345         } else if (rc != 0) {
9346                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9347                         return false;
9348                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9349                               tp->irq_cnt, rc);
9350                 tp->irq_cnt = rc;
9351         }
9352
9353         for (i = 0; i < tp->irq_max; i++)
9354                 tp->napi[i].irq_vec = msix_ent[i].vector;
9355
9356         netif_set_real_num_tx_queues(tp->dev, 1);
9357         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9358         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9359                 pci_disable_msix(tp->pdev);
9360                 return false;
9361         }
9362
9363         if (tp->irq_cnt > 1) {
9364                 tg3_flag_set(tp, ENABLE_RSS);
9365
9366                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9367                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9368                         tg3_flag_set(tp, ENABLE_TSS);
9369                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9370                 }
9371         }
9372
9373         return true;
9374 }
9375
9376 static void tg3_ints_init(struct tg3 *tp)
9377 {
9378         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9379             !tg3_flag(tp, TAGGED_STATUS)) {
9380                 /* All MSI supporting chips should support tagged
9381                  * status.  Assert that this is the case.
9382                  */
9383                 netdev_warn(tp->dev,
9384                             "MSI without TAGGED_STATUS? Not using MSI\n");
9385                 goto defcfg;
9386         }
9387
9388         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9389                 tg3_flag_set(tp, USING_MSIX);
9390         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9391                 tg3_flag_set(tp, USING_MSI);
9392
9393         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9394                 u32 msi_mode = tr32(MSGINT_MODE);
9395                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9396                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9397                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9398         }
9399 defcfg:
9400         if (!tg3_flag(tp, USING_MSIX)) {
9401                 tp->irq_cnt = 1;
9402                 tp->napi[0].irq_vec = tp->pdev->irq;
9403                 netif_set_real_num_tx_queues(tp->dev, 1);
9404                 netif_set_real_num_rx_queues(tp->dev, 1);
9405         }
9406 }
9407
9408 static void tg3_ints_fini(struct tg3 *tp)
9409 {
9410         if (tg3_flag(tp, USING_MSIX))
9411                 pci_disable_msix(tp->pdev);
9412         else if (tg3_flag(tp, USING_MSI))
9413                 pci_disable_msi(tp->pdev);
9414         tg3_flag_clear(tp, USING_MSI);
9415         tg3_flag_clear(tp, USING_MSIX);
9416         tg3_flag_clear(tp, ENABLE_RSS);
9417         tg3_flag_clear(tp, ENABLE_TSS);
9418 }
9419
9420 static int tg3_open(struct net_device *dev)
9421 {
9422         struct tg3 *tp = netdev_priv(dev);
9423         int i, err;
9424
9425         if (tp->fw_needed) {
9426                 err = tg3_request_firmware(tp);
9427                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9428                         if (err)
9429                                 return err;
9430                 } else if (err) {
9431                         netdev_warn(tp->dev, "TSO capability disabled\n");
9432                         tg3_flag_clear(tp, TSO_CAPABLE);
9433                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9434                         netdev_notice(tp->dev, "TSO capability restored\n");
9435                         tg3_flag_set(tp, TSO_CAPABLE);
9436                 }
9437         }
9438
9439         netif_carrier_off(tp->dev);
9440
9441         err = tg3_power_up(tp);
9442         if (err)
9443                 return err;
9444
9445         tg3_full_lock(tp, 0);
9446
9447         tg3_disable_ints(tp);
9448         tg3_flag_clear(tp, INIT_COMPLETE);
9449
9450         tg3_full_unlock(tp);
9451
9452         /*
9453          * Setup interrupts first so we know how
9454          * many NAPI resources to allocate
9455          */
9456         tg3_ints_init(tp);
9457
9458         /* The placement of this call is tied
9459          * to the setup and use of Host TX descriptors.
9460          */
9461         err = tg3_alloc_consistent(tp);
9462         if (err)
9463                 goto err_out1;
9464
9465         tg3_napi_init(tp);
9466
9467         tg3_napi_enable(tp);
9468
9469         for (i = 0; i < tp->irq_cnt; i++) {
9470                 struct tg3_napi *tnapi = &tp->napi[i];
9471                 err = tg3_request_irq(tp, i);
9472                 if (err) {
9473                         for (i--; i >= 0; i--)
9474                                 free_irq(tnapi->irq_vec, tnapi);
9475                         break;
9476                 }
9477         }
9478
9479         if (err)
9480                 goto err_out2;
9481
9482         tg3_full_lock(tp, 0);
9483
9484         err = tg3_init_hw(tp, 1);
9485         if (err) {
9486                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9487                 tg3_free_rings(tp);
9488         } else {
9489                 if (tg3_flag(tp, TAGGED_STATUS) &&
9490                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9491                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9492                         tp->timer_offset = HZ;
9493                 else
9494                         tp->timer_offset = HZ / 10;
9495
9496                 BUG_ON(tp->timer_offset > HZ);
9497                 tp->timer_counter = tp->timer_multiplier =
9498                         (HZ / tp->timer_offset);
9499                 tp->asf_counter = tp->asf_multiplier =
9500                         ((HZ / tp->timer_offset) * 2);
9501
9502                 init_timer(&tp->timer);
9503                 tp->timer.expires = jiffies + tp->timer_offset;
9504                 tp->timer.data = (unsigned long) tp;
9505                 tp->timer.function = tg3_timer;
9506         }
9507
9508         tg3_full_unlock(tp);
9509
9510         if (err)
9511                 goto err_out3;
9512
9513         if (tg3_flag(tp, USING_MSI)) {
9514                 err = tg3_test_msi(tp);
9515
9516                 if (err) {
9517                         tg3_full_lock(tp, 0);
9518                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9519                         tg3_free_rings(tp);
9520                         tg3_full_unlock(tp);
9521
9522                         goto err_out2;
9523                 }
9524
9525                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9526                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9527
9528                         tw32(PCIE_TRANSACTION_CFG,
9529                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9530                 }
9531         }
9532
9533         tg3_phy_start(tp);
9534
9535         tg3_full_lock(tp, 0);
9536
9537         add_timer(&tp->timer);
9538         tg3_flag_set(tp, INIT_COMPLETE);
9539         tg3_enable_ints(tp);
9540
9541         tg3_full_unlock(tp);
9542
9543         netif_tx_start_all_queues(dev);
9544
9545         /*
9546          * Reset loopback feature if it was turned on while the device was down
9547          * make sure that it's installed properly now.
9548          */
9549         if (dev->features & NETIF_F_LOOPBACK)
9550                 tg3_set_loopback(dev, dev->features);
9551
9552         return 0;
9553
9554 err_out3:
9555         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9556                 struct tg3_napi *tnapi = &tp->napi[i];
9557                 free_irq(tnapi->irq_vec, tnapi);
9558         }
9559
9560 err_out2:
9561         tg3_napi_disable(tp);
9562         tg3_napi_fini(tp);
9563         tg3_free_consistent(tp);
9564
9565 err_out1:
9566         tg3_ints_fini(tp);
9567         tg3_frob_aux_power(tp, false);
9568         pci_set_power_state(tp->pdev, PCI_D3hot);
9569         return err;
9570 }
9571
9572 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9573                                                  struct rtnl_link_stats64 *);
9574 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9575
9576 static int tg3_close(struct net_device *dev)
9577 {
9578         int i;
9579         struct tg3 *tp = netdev_priv(dev);
9580
9581         tg3_napi_disable(tp);
9582         cancel_work_sync(&tp->reset_task);
9583
9584         netif_tx_stop_all_queues(dev);
9585
9586         del_timer_sync(&tp->timer);
9587
9588         tg3_phy_stop(tp);
9589
9590         tg3_full_lock(tp, 1);
9591
9592         tg3_disable_ints(tp);
9593
9594         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9595         tg3_free_rings(tp);
9596         tg3_flag_clear(tp, INIT_COMPLETE);
9597
9598         tg3_full_unlock(tp);
9599
9600         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9601                 struct tg3_napi *tnapi = &tp->napi[i];
9602                 free_irq(tnapi->irq_vec, tnapi);
9603         }
9604
9605         tg3_ints_fini(tp);
9606
9607         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9608
9609         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9610                sizeof(tp->estats_prev));
9611
9612         tg3_napi_fini(tp);
9613
9614         tg3_free_consistent(tp);
9615
9616         tg3_power_down(tp);
9617
9618         netif_carrier_off(tp->dev);
9619
9620         return 0;
9621 }
9622
9623 static inline u64 get_stat64(tg3_stat64_t *val)
9624 {
9625        return ((u64)val->high << 32) | ((u64)val->low);
9626 }
9627
9628 static u64 calc_crc_errors(struct tg3 *tp)
9629 {
9630         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9631
9632         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9633             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9634              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9635                 u32 val;
9636
9637                 spin_lock_bh(&tp->lock);
9638                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9639                         tg3_writephy(tp, MII_TG3_TEST1,
9640                                      val | MII_TG3_TEST1_CRC_EN);
9641                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9642                 } else
9643                         val = 0;
9644                 spin_unlock_bh(&tp->lock);
9645
9646                 tp->phy_crc_errors += val;
9647
9648                 return tp->phy_crc_errors;
9649         }
9650
9651         return get_stat64(&hw_stats->rx_fcs_errors);
9652 }
9653
9654 #define ESTAT_ADD(member) \
9655         estats->member =        old_estats->member + \
9656                                 get_stat64(&hw_stats->member)
9657
9658 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9659 {
9660         struct tg3_ethtool_stats *estats = &tp->estats;
9661         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9662         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9663
9664         if (!hw_stats)
9665                 return old_estats;
9666
9667         ESTAT_ADD(rx_octets);
9668         ESTAT_ADD(rx_fragments);
9669         ESTAT_ADD(rx_ucast_packets);
9670         ESTAT_ADD(rx_mcast_packets);
9671         ESTAT_ADD(rx_bcast_packets);
9672         ESTAT_ADD(rx_fcs_errors);
9673         ESTAT_ADD(rx_align_errors);
9674         ESTAT_ADD(rx_xon_pause_rcvd);
9675         ESTAT_ADD(rx_xoff_pause_rcvd);
9676         ESTAT_ADD(rx_mac_ctrl_rcvd);
9677         ESTAT_ADD(rx_xoff_entered);
9678         ESTAT_ADD(rx_frame_too_long_errors);
9679         ESTAT_ADD(rx_jabbers);
9680         ESTAT_ADD(rx_undersize_packets);
9681         ESTAT_ADD(rx_in_length_errors);
9682         ESTAT_ADD(rx_out_length_errors);
9683         ESTAT_ADD(rx_64_or_less_octet_packets);
9684         ESTAT_ADD(rx_65_to_127_octet_packets);
9685         ESTAT_ADD(rx_128_to_255_octet_packets);
9686         ESTAT_ADD(rx_256_to_511_octet_packets);
9687         ESTAT_ADD(rx_512_to_1023_octet_packets);
9688         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9689         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9690         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9691         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9692         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9693
9694         ESTAT_ADD(tx_octets);
9695         ESTAT_ADD(tx_collisions);
9696         ESTAT_ADD(tx_xon_sent);
9697         ESTAT_ADD(tx_xoff_sent);
9698         ESTAT_ADD(tx_flow_control);
9699         ESTAT_ADD(tx_mac_errors);
9700         ESTAT_ADD(tx_single_collisions);
9701         ESTAT_ADD(tx_mult_collisions);
9702         ESTAT_ADD(tx_deferred);
9703         ESTAT_ADD(tx_excessive_collisions);
9704         ESTAT_ADD(tx_late_collisions);
9705         ESTAT_ADD(tx_collide_2times);
9706         ESTAT_ADD(tx_collide_3times);
9707         ESTAT_ADD(tx_collide_4times);
9708         ESTAT_ADD(tx_collide_5times);
9709         ESTAT_ADD(tx_collide_6times);
9710         ESTAT_ADD(tx_collide_7times);
9711         ESTAT_ADD(tx_collide_8times);
9712         ESTAT_ADD(tx_collide_9times);
9713         ESTAT_ADD(tx_collide_10times);
9714         ESTAT_ADD(tx_collide_11times);
9715         ESTAT_ADD(tx_collide_12times);
9716         ESTAT_ADD(tx_collide_13times);
9717         ESTAT_ADD(tx_collide_14times);
9718         ESTAT_ADD(tx_collide_15times);
9719         ESTAT_ADD(tx_ucast_packets);
9720         ESTAT_ADD(tx_mcast_packets);
9721         ESTAT_ADD(tx_bcast_packets);
9722         ESTAT_ADD(tx_carrier_sense_errors);
9723         ESTAT_ADD(tx_discards);
9724         ESTAT_ADD(tx_errors);
9725
9726         ESTAT_ADD(dma_writeq_full);
9727         ESTAT_ADD(dma_write_prioq_full);
9728         ESTAT_ADD(rxbds_empty);
9729         ESTAT_ADD(rx_discards);
9730         ESTAT_ADD(rx_errors);
9731         ESTAT_ADD(rx_threshold_hit);
9732
9733         ESTAT_ADD(dma_readq_full);
9734         ESTAT_ADD(dma_read_prioq_full);
9735         ESTAT_ADD(tx_comp_queue_full);
9736
9737         ESTAT_ADD(ring_set_send_prod_index);
9738         ESTAT_ADD(ring_status_update);
9739         ESTAT_ADD(nic_irqs);
9740         ESTAT_ADD(nic_avoided_irqs);
9741         ESTAT_ADD(nic_tx_threshold_hit);
9742
9743         ESTAT_ADD(mbuf_lwm_thresh_hit);
9744
9745         return estats;
9746 }
9747
9748 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9749                                                  struct rtnl_link_stats64 *stats)
9750 {
9751         struct tg3 *tp = netdev_priv(dev);
9752         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9753         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9754
9755         if (!hw_stats)
9756                 return old_stats;
9757
9758         stats->rx_packets = old_stats->rx_packets +
9759                 get_stat64(&hw_stats->rx_ucast_packets) +
9760                 get_stat64(&hw_stats->rx_mcast_packets) +
9761                 get_stat64(&hw_stats->rx_bcast_packets);
9762
9763         stats->tx_packets = old_stats->tx_packets +
9764                 get_stat64(&hw_stats->tx_ucast_packets) +
9765                 get_stat64(&hw_stats->tx_mcast_packets) +
9766                 get_stat64(&hw_stats->tx_bcast_packets);
9767
9768         stats->rx_bytes = old_stats->rx_bytes +
9769                 get_stat64(&hw_stats->rx_octets);
9770         stats->tx_bytes = old_stats->tx_bytes +
9771                 get_stat64(&hw_stats->tx_octets);
9772
9773         stats->rx_errors = old_stats->rx_errors +
9774                 get_stat64(&hw_stats->rx_errors);
9775         stats->tx_errors = old_stats->tx_errors +
9776                 get_stat64(&hw_stats->tx_errors) +
9777                 get_stat64(&hw_stats->tx_mac_errors) +
9778                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9779                 get_stat64(&hw_stats->tx_discards);
9780
9781         stats->multicast = old_stats->multicast +
9782                 get_stat64(&hw_stats->rx_mcast_packets);
9783         stats->collisions = old_stats->collisions +
9784                 get_stat64(&hw_stats->tx_collisions);
9785
9786         stats->rx_length_errors = old_stats->rx_length_errors +
9787                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9788                 get_stat64(&hw_stats->rx_undersize_packets);
9789
9790         stats->rx_over_errors = old_stats->rx_over_errors +
9791                 get_stat64(&hw_stats->rxbds_empty);
9792         stats->rx_frame_errors = old_stats->rx_frame_errors +
9793                 get_stat64(&hw_stats->rx_align_errors);
9794         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9795                 get_stat64(&hw_stats->tx_discards);
9796         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9797                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9798
9799         stats->rx_crc_errors = old_stats->rx_crc_errors +
9800                 calc_crc_errors(tp);
9801
9802         stats->rx_missed_errors = old_stats->rx_missed_errors +
9803                 get_stat64(&hw_stats->rx_discards);
9804
9805         stats->rx_dropped = tp->rx_dropped;
9806
9807         return stats;
9808 }
9809
9810 static inline u32 calc_crc(unsigned char *buf, int len)
9811 {
9812         u32 reg;
9813         u32 tmp;
9814         int j, k;
9815
9816         reg = 0xffffffff;
9817
9818         for (j = 0; j < len; j++) {
9819                 reg ^= buf[j];
9820
9821                 for (k = 0; k < 8; k++) {
9822                         tmp = reg & 0x01;
9823
9824                         reg >>= 1;
9825
9826                         if (tmp)
9827                                 reg ^= 0xedb88320;
9828                 }
9829         }
9830
9831         return ~reg;
9832 }
9833
9834 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9835 {
9836         /* accept or reject all multicast frames */
9837         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9838         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9839         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9840         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9841 }
9842
9843 static void __tg3_set_rx_mode(struct net_device *dev)
9844 {
9845         struct tg3 *tp = netdev_priv(dev);
9846         u32 rx_mode;
9847
9848         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9849                                   RX_MODE_KEEP_VLAN_TAG);
9850
9851 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9852         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9853          * flag clear.
9854          */
9855         if (!tg3_flag(tp, ENABLE_ASF))
9856                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9857 #endif
9858
9859         if (dev->flags & IFF_PROMISC) {
9860                 /* Promiscuous mode. */
9861                 rx_mode |= RX_MODE_PROMISC;
9862         } else if (dev->flags & IFF_ALLMULTI) {
9863                 /* Accept all multicast. */
9864                 tg3_set_multi(tp, 1);
9865         } else if (netdev_mc_empty(dev)) {
9866                 /* Reject all multicast. */
9867                 tg3_set_multi(tp, 0);
9868         } else {
9869                 /* Accept one or more multicast(s). */
9870                 struct netdev_hw_addr *ha;
9871                 u32 mc_filter[4] = { 0, };
9872                 u32 regidx;
9873                 u32 bit;
9874                 u32 crc;
9875
9876                 netdev_for_each_mc_addr(ha, dev) {
9877                         crc = calc_crc(ha->addr, ETH_ALEN);
9878                         bit = ~crc & 0x7f;
9879                         regidx = (bit & 0x60) >> 5;
9880                         bit &= 0x1f;
9881                         mc_filter[regidx] |= (1 << bit);
9882                 }
9883
9884                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9885                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9886                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9887                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9888         }
9889
9890         if (rx_mode != tp->rx_mode) {
9891                 tp->rx_mode = rx_mode;
9892                 tw32_f(MAC_RX_MODE, rx_mode);
9893                 udelay(10);
9894         }
9895 }
9896
9897 static void tg3_set_rx_mode(struct net_device *dev)
9898 {
9899         struct tg3 *tp = netdev_priv(dev);
9900
9901         if (!netif_running(dev))
9902                 return;
9903
9904         tg3_full_lock(tp, 0);
9905         __tg3_set_rx_mode(dev);
9906         tg3_full_unlock(tp);
9907 }
9908
9909 static int tg3_get_regs_len(struct net_device *dev)
9910 {
9911         return TG3_REG_BLK_SIZE;
9912 }
9913
9914 static void tg3_get_regs(struct net_device *dev,
9915                 struct ethtool_regs *regs, void *_p)
9916 {
9917         struct tg3 *tp = netdev_priv(dev);
9918
9919         regs->version = 0;
9920
9921         memset(_p, 0, TG3_REG_BLK_SIZE);
9922
9923         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9924                 return;
9925
9926         tg3_full_lock(tp, 0);
9927
9928         tg3_dump_legacy_regs(tp, (u32 *)_p);
9929
9930         tg3_full_unlock(tp);
9931 }
9932
9933 static int tg3_get_eeprom_len(struct net_device *dev)
9934 {
9935         struct tg3 *tp = netdev_priv(dev);
9936
9937         return tp->nvram_size;
9938 }
9939
9940 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9941 {
9942         struct tg3 *tp = netdev_priv(dev);
9943         int ret;
9944         u8  *pd;
9945         u32 i, offset, len, b_offset, b_count;
9946         __be32 val;
9947
9948         if (tg3_flag(tp, NO_NVRAM))
9949                 return -EINVAL;
9950
9951         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9952                 return -EAGAIN;
9953
9954         offset = eeprom->offset;
9955         len = eeprom->len;
9956         eeprom->len = 0;
9957
9958         eeprom->magic = TG3_EEPROM_MAGIC;
9959
9960         if (offset & 3) {
9961                 /* adjustments to start on required 4 byte boundary */
9962                 b_offset = offset & 3;
9963                 b_count = 4 - b_offset;
9964                 if (b_count > len) {
9965                         /* i.e. offset=1 len=2 */
9966                         b_count = len;
9967                 }
9968                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9969                 if (ret)
9970                         return ret;
9971                 memcpy(data, ((char *)&val) + b_offset, b_count);
9972                 len -= b_count;
9973                 offset += b_count;
9974                 eeprom->len += b_count;
9975         }
9976
9977         /* read bytes up to the last 4 byte boundary */
9978         pd = &data[eeprom->len];
9979         for (i = 0; i < (len - (len & 3)); i += 4) {
9980                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9981                 if (ret) {
9982                         eeprom->len += i;
9983                         return ret;
9984                 }
9985                 memcpy(pd + i, &val, 4);
9986         }
9987         eeprom->len += i;
9988
9989         if (len & 3) {
9990                 /* read last bytes not ending on 4 byte boundary */
9991                 pd = &data[eeprom->len];
9992                 b_count = len & 3;
9993                 b_offset = offset + len - b_count;
9994                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9995                 if (ret)
9996                         return ret;
9997                 memcpy(pd, &val, b_count);
9998                 eeprom->len += b_count;
9999         }
10000         return 0;
10001 }
10002
10003 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10004
10005 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10006 {
10007         struct tg3 *tp = netdev_priv(dev);
10008         int ret;
10009         u32 offset, len, b_offset, odd_len;
10010         u8 *buf;
10011         __be32 start, end;
10012
10013         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10014                 return -EAGAIN;
10015
10016         if (tg3_flag(tp, NO_NVRAM) ||
10017             eeprom->magic != TG3_EEPROM_MAGIC)
10018                 return -EINVAL;
10019
10020         offset = eeprom->offset;
10021         len = eeprom->len;
10022
10023         if ((b_offset = (offset & 3))) {
10024                 /* adjustments to start on required 4 byte boundary */
10025                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10026                 if (ret)
10027                         return ret;
10028                 len += b_offset;
10029                 offset &= ~3;
10030                 if (len < 4)
10031                         len = 4;
10032         }
10033
10034         odd_len = 0;
10035         if (len & 3) {
10036                 /* adjustments to end on required 4 byte boundary */
10037                 odd_len = 1;
10038                 len = (len + 3) & ~3;
10039                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10040                 if (ret)
10041                         return ret;
10042         }
10043
10044         buf = data;
10045         if (b_offset || odd_len) {
10046                 buf = kmalloc(len, GFP_KERNEL);
10047                 if (!buf)
10048                         return -ENOMEM;
10049                 if (b_offset)
10050                         memcpy(buf, &start, 4);
10051                 if (odd_len)
10052                         memcpy(buf+len-4, &end, 4);
10053                 memcpy(buf + b_offset, data, eeprom->len);
10054         }
10055
10056         ret = tg3_nvram_write_block(tp, offset, len, buf);
10057
10058         if (buf != data)
10059                 kfree(buf);
10060
10061         return ret;
10062 }
10063
10064 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10065 {
10066         struct tg3 *tp = netdev_priv(dev);
10067
10068         if (tg3_flag(tp, USE_PHYLIB)) {
10069                 struct phy_device *phydev;
10070                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10071                         return -EAGAIN;
10072                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10073                 return phy_ethtool_gset(phydev, cmd);
10074         }
10075
10076         cmd->supported = (SUPPORTED_Autoneg);
10077
10078         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10079                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10080                                    SUPPORTED_1000baseT_Full);
10081
10082         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10083                 cmd->supported |= (SUPPORTED_100baseT_Half |
10084                                   SUPPORTED_100baseT_Full |
10085                                   SUPPORTED_10baseT_Half |
10086                                   SUPPORTED_10baseT_Full |
10087                                   SUPPORTED_TP);
10088                 cmd->port = PORT_TP;
10089         } else {
10090                 cmd->supported |= SUPPORTED_FIBRE;
10091                 cmd->port = PORT_FIBRE;
10092         }
10093
10094         cmd->advertising = tp->link_config.advertising;
10095         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10096                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10097                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10098                                 cmd->advertising |= ADVERTISED_Pause;
10099                         } else {
10100                                 cmd->advertising |= ADVERTISED_Pause |
10101                                                     ADVERTISED_Asym_Pause;
10102                         }
10103                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10104                         cmd->advertising |= ADVERTISED_Asym_Pause;
10105                 }
10106         }
10107         if (netif_running(dev)) {
10108                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10109                 cmd->duplex = tp->link_config.active_duplex;
10110         } else {
10111                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10112                 cmd->duplex = DUPLEX_INVALID;
10113         }
10114         cmd->phy_address = tp->phy_addr;
10115         cmd->transceiver = XCVR_INTERNAL;
10116         cmd->autoneg = tp->link_config.autoneg;
10117         cmd->maxtxpkt = 0;
10118         cmd->maxrxpkt = 0;
10119         return 0;
10120 }
10121
10122 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10123 {
10124         struct tg3 *tp = netdev_priv(dev);
10125         u32 speed = ethtool_cmd_speed(cmd);
10126
10127         if (tg3_flag(tp, USE_PHYLIB)) {
10128                 struct phy_device *phydev;
10129                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10130                         return -EAGAIN;
10131                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10132                 return phy_ethtool_sset(phydev, cmd);
10133         }
10134
10135         if (cmd->autoneg != AUTONEG_ENABLE &&
10136             cmd->autoneg != AUTONEG_DISABLE)
10137                 return -EINVAL;
10138
10139         if (cmd->autoneg == AUTONEG_DISABLE &&
10140             cmd->duplex != DUPLEX_FULL &&
10141             cmd->duplex != DUPLEX_HALF)
10142                 return -EINVAL;
10143
10144         if (cmd->autoneg == AUTONEG_ENABLE) {
10145                 u32 mask = ADVERTISED_Autoneg |
10146                            ADVERTISED_Pause |
10147                            ADVERTISED_Asym_Pause;
10148
10149                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10150                         mask |= ADVERTISED_1000baseT_Half |
10151                                 ADVERTISED_1000baseT_Full;
10152
10153                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10154                         mask |= ADVERTISED_100baseT_Half |
10155                                 ADVERTISED_100baseT_Full |
10156                                 ADVERTISED_10baseT_Half |
10157                                 ADVERTISED_10baseT_Full |
10158                                 ADVERTISED_TP;
10159                 else
10160                         mask |= ADVERTISED_FIBRE;
10161
10162                 if (cmd->advertising & ~mask)
10163                         return -EINVAL;
10164
10165                 mask &= (ADVERTISED_1000baseT_Half |
10166                          ADVERTISED_1000baseT_Full |
10167                          ADVERTISED_100baseT_Half |
10168                          ADVERTISED_100baseT_Full |
10169                          ADVERTISED_10baseT_Half |
10170                          ADVERTISED_10baseT_Full);
10171
10172                 cmd->advertising &= mask;
10173         } else {
10174                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10175                         if (speed != SPEED_1000)
10176                                 return -EINVAL;
10177
10178                         if (cmd->duplex != DUPLEX_FULL)
10179                                 return -EINVAL;
10180                 } else {
10181                         if (speed != SPEED_100 &&
10182                             speed != SPEED_10)
10183                                 return -EINVAL;
10184                 }
10185         }
10186
10187         tg3_full_lock(tp, 0);
10188
10189         tp->link_config.autoneg = cmd->autoneg;
10190         if (cmd->autoneg == AUTONEG_ENABLE) {
10191                 tp->link_config.advertising = (cmd->advertising |
10192                                               ADVERTISED_Autoneg);
10193                 tp->link_config.speed = SPEED_INVALID;
10194                 tp->link_config.duplex = DUPLEX_INVALID;
10195         } else {
10196                 tp->link_config.advertising = 0;
10197                 tp->link_config.speed = speed;
10198                 tp->link_config.duplex = cmd->duplex;
10199         }
10200
10201         tp->link_config.orig_speed = tp->link_config.speed;
10202         tp->link_config.orig_duplex = tp->link_config.duplex;
10203         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10204
10205         if (netif_running(dev))
10206                 tg3_setup_phy(tp, 1);
10207
10208         tg3_full_unlock(tp);
10209
10210         return 0;
10211 }
10212
10213 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10214 {
10215         struct tg3 *tp = netdev_priv(dev);
10216
10217         strcpy(info->driver, DRV_MODULE_NAME);
10218         strcpy(info->version, DRV_MODULE_VERSION);
10219         strcpy(info->fw_version, tp->fw_ver);
10220         strcpy(info->bus_info, pci_name(tp->pdev));
10221 }
10222
10223 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10224 {
10225         struct tg3 *tp = netdev_priv(dev);
10226
10227         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10228                 wol->supported = WAKE_MAGIC;
10229         else
10230                 wol->supported = 0;
10231         wol->wolopts = 0;
10232         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10233                 wol->wolopts = WAKE_MAGIC;
10234         memset(&wol->sopass, 0, sizeof(wol->sopass));
10235 }
10236
10237 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10238 {
10239         struct tg3 *tp = netdev_priv(dev);
10240         struct device *dp = &tp->pdev->dev;
10241
10242         if (wol->wolopts & ~WAKE_MAGIC)
10243                 return -EINVAL;
10244         if ((wol->wolopts & WAKE_MAGIC) &&
10245             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10246                 return -EINVAL;
10247
10248         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10249
10250         spin_lock_bh(&tp->lock);
10251         if (device_may_wakeup(dp))
10252                 tg3_flag_set(tp, WOL_ENABLE);
10253         else
10254                 tg3_flag_clear(tp, WOL_ENABLE);
10255         spin_unlock_bh(&tp->lock);
10256
10257         return 0;
10258 }
10259
10260 static u32 tg3_get_msglevel(struct net_device *dev)
10261 {
10262         struct tg3 *tp = netdev_priv(dev);
10263         return tp->msg_enable;
10264 }
10265
10266 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10267 {
10268         struct tg3 *tp = netdev_priv(dev);
10269         tp->msg_enable = value;
10270 }
10271
10272 static int tg3_nway_reset(struct net_device *dev)
10273 {
10274         struct tg3 *tp = netdev_priv(dev);
10275         int r;
10276
10277         if (!netif_running(dev))
10278                 return -EAGAIN;
10279
10280         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10281                 return -EINVAL;
10282
10283         if (tg3_flag(tp, USE_PHYLIB)) {
10284                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10285                         return -EAGAIN;
10286                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10287         } else {
10288                 u32 bmcr;
10289
10290                 spin_lock_bh(&tp->lock);
10291                 r = -EINVAL;
10292                 tg3_readphy(tp, MII_BMCR, &bmcr);
10293                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10294                     ((bmcr & BMCR_ANENABLE) ||
10295                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10296                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10297                                                    BMCR_ANENABLE);
10298                         r = 0;
10299                 }
10300                 spin_unlock_bh(&tp->lock);
10301         }
10302
10303         return r;
10304 }
10305
10306 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10307 {
10308         struct tg3 *tp = netdev_priv(dev);
10309
10310         ering->rx_max_pending = tp->rx_std_ring_mask;
10311         ering->rx_mini_max_pending = 0;
10312         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10313                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10314         else
10315                 ering->rx_jumbo_max_pending = 0;
10316
10317         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10318
10319         ering->rx_pending = tp->rx_pending;
10320         ering->rx_mini_pending = 0;
10321         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10322                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10323         else
10324                 ering->rx_jumbo_pending = 0;
10325
10326         ering->tx_pending = tp->napi[0].tx_pending;
10327 }
10328
10329 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10330 {
10331         struct tg3 *tp = netdev_priv(dev);
10332         int i, irq_sync = 0, err = 0;
10333
10334         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10335             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10336             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10337             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10338             (tg3_flag(tp, TSO_BUG) &&
10339              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10340                 return -EINVAL;
10341
10342         if (netif_running(dev)) {
10343                 tg3_phy_stop(tp);
10344                 tg3_netif_stop(tp);
10345                 irq_sync = 1;
10346         }
10347
10348         tg3_full_lock(tp, irq_sync);
10349
10350         tp->rx_pending = ering->rx_pending;
10351
10352         if (tg3_flag(tp, MAX_RXPEND_64) &&
10353             tp->rx_pending > 63)
10354                 tp->rx_pending = 63;
10355         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10356
10357         for (i = 0; i < tp->irq_max; i++)
10358                 tp->napi[i].tx_pending = ering->tx_pending;
10359
10360         if (netif_running(dev)) {
10361                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10362                 err = tg3_restart_hw(tp, 1);
10363                 if (!err)
10364                         tg3_netif_start(tp);
10365         }
10366
10367         tg3_full_unlock(tp);
10368
10369         if (irq_sync && !err)
10370                 tg3_phy_start(tp);
10371
10372         return err;
10373 }
10374
10375 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10376 {
10377         struct tg3 *tp = netdev_priv(dev);
10378
10379         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10380
10381         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10382                 epause->rx_pause = 1;
10383         else
10384                 epause->rx_pause = 0;
10385
10386         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10387                 epause->tx_pause = 1;
10388         else
10389                 epause->tx_pause = 0;
10390 }
10391
10392 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10393 {
10394         struct tg3 *tp = netdev_priv(dev);
10395         int err = 0;
10396
10397         if (tg3_flag(tp, USE_PHYLIB)) {
10398                 u32 newadv;
10399                 struct phy_device *phydev;
10400
10401                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10402
10403                 if (!(phydev->supported & SUPPORTED_Pause) ||
10404                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10405                      (epause->rx_pause != epause->tx_pause)))
10406                         return -EINVAL;
10407
10408                 tp->link_config.flowctrl = 0;
10409                 if (epause->rx_pause) {
10410                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10411
10412                         if (epause->tx_pause) {
10413                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10414                                 newadv = ADVERTISED_Pause;
10415                         } else
10416                                 newadv = ADVERTISED_Pause |
10417                                          ADVERTISED_Asym_Pause;
10418                 } else if (epause->tx_pause) {
10419                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10420                         newadv = ADVERTISED_Asym_Pause;
10421                 } else
10422                         newadv = 0;
10423
10424                 if (epause->autoneg)
10425                         tg3_flag_set(tp, PAUSE_AUTONEG);
10426                 else
10427                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10428
10429                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10430                         u32 oldadv = phydev->advertising &
10431                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10432                         if (oldadv != newadv) {
10433                                 phydev->advertising &=
10434                                         ~(ADVERTISED_Pause |
10435                                           ADVERTISED_Asym_Pause);
10436                                 phydev->advertising |= newadv;
10437                                 if (phydev->autoneg) {
10438                                         /*
10439                                          * Always renegotiate the link to
10440                                          * inform our link partner of our
10441                                          * flow control settings, even if the
10442                                          * flow control is forced.  Let
10443                                          * tg3_adjust_link() do the final
10444                                          * flow control setup.
10445                                          */
10446                                         return phy_start_aneg(phydev);
10447                                 }
10448                         }
10449
10450                         if (!epause->autoneg)
10451                                 tg3_setup_flow_control(tp, 0, 0);
10452                 } else {
10453                         tp->link_config.orig_advertising &=
10454                                         ~(ADVERTISED_Pause |
10455                                           ADVERTISED_Asym_Pause);
10456                         tp->link_config.orig_advertising |= newadv;
10457                 }
10458         } else {
10459                 int irq_sync = 0;
10460
10461                 if (netif_running(dev)) {
10462                         tg3_netif_stop(tp);
10463                         irq_sync = 1;
10464                 }
10465
10466                 tg3_full_lock(tp, irq_sync);
10467
10468                 if (epause->autoneg)
10469                         tg3_flag_set(tp, PAUSE_AUTONEG);
10470                 else
10471                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10472                 if (epause->rx_pause)
10473                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10474                 else
10475                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10476                 if (epause->tx_pause)
10477                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10478                 else
10479                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10480
10481                 if (netif_running(dev)) {
10482                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10483                         err = tg3_restart_hw(tp, 1);
10484                         if (!err)
10485                                 tg3_netif_start(tp);
10486                 }
10487
10488                 tg3_full_unlock(tp);
10489         }
10490
10491         return err;
10492 }
10493
10494 static int tg3_get_sset_count(struct net_device *dev, int sset)
10495 {
10496         switch (sset) {
10497         case ETH_SS_TEST:
10498                 return TG3_NUM_TEST;
10499         case ETH_SS_STATS:
10500                 return TG3_NUM_STATS;
10501         default:
10502                 return -EOPNOTSUPP;
10503         }
10504 }
10505
10506 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10507 {
10508         switch (stringset) {
10509         case ETH_SS_STATS:
10510                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10511                 break;
10512         case ETH_SS_TEST:
10513                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10514                 break;
10515         default:
10516                 WARN_ON(1);     /* we need a WARN() */
10517                 break;
10518         }
10519 }
10520
10521 static int tg3_set_phys_id(struct net_device *dev,
10522                             enum ethtool_phys_id_state state)
10523 {
10524         struct tg3 *tp = netdev_priv(dev);
10525
10526         if (!netif_running(tp->dev))
10527                 return -EAGAIN;
10528
10529         switch (state) {
10530         case ETHTOOL_ID_ACTIVE:
10531                 return 1;       /* cycle on/off once per second */
10532
10533         case ETHTOOL_ID_ON:
10534                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10535                      LED_CTRL_1000MBPS_ON |
10536                      LED_CTRL_100MBPS_ON |
10537                      LED_CTRL_10MBPS_ON |
10538                      LED_CTRL_TRAFFIC_OVERRIDE |
10539                      LED_CTRL_TRAFFIC_BLINK |
10540                      LED_CTRL_TRAFFIC_LED);
10541                 break;
10542
10543         case ETHTOOL_ID_OFF:
10544                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10545                      LED_CTRL_TRAFFIC_OVERRIDE);
10546                 break;
10547
10548         case ETHTOOL_ID_INACTIVE:
10549                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10550                 break;
10551         }
10552
10553         return 0;
10554 }
10555
10556 static void tg3_get_ethtool_stats(struct net_device *dev,
10557                                    struct ethtool_stats *estats, u64 *tmp_stats)
10558 {
10559         struct tg3 *tp = netdev_priv(dev);
10560         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10561 }
10562
10563 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10564 {
10565         int i;
10566         __be32 *buf;
10567         u32 offset = 0, len = 0;
10568         u32 magic, val;
10569
10570         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10571                 return NULL;
10572
10573         if (magic == TG3_EEPROM_MAGIC) {
10574                 for (offset = TG3_NVM_DIR_START;
10575                      offset < TG3_NVM_DIR_END;
10576                      offset += TG3_NVM_DIRENT_SIZE) {
10577                         if (tg3_nvram_read(tp, offset, &val))
10578                                 return NULL;
10579
10580                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10581                             TG3_NVM_DIRTYPE_EXTVPD)
10582                                 break;
10583                 }
10584
10585                 if (offset != TG3_NVM_DIR_END) {
10586                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10587                         if (tg3_nvram_read(tp, offset + 4, &offset))
10588                                 return NULL;
10589
10590                         offset = tg3_nvram_logical_addr(tp, offset);
10591                 }
10592         }
10593
10594         if (!offset || !len) {
10595                 offset = TG3_NVM_VPD_OFF;
10596                 len = TG3_NVM_VPD_LEN;
10597         }
10598
10599         buf = kmalloc(len, GFP_KERNEL);
10600         if (buf == NULL)
10601                 return NULL;
10602
10603         if (magic == TG3_EEPROM_MAGIC) {
10604                 for (i = 0; i < len; i += 4) {
10605                         /* The data is in little-endian format in NVRAM.
10606                          * Use the big-endian read routines to preserve
10607                          * the byte order as it exists in NVRAM.
10608                          */
10609                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10610                                 goto error;
10611                 }
10612         } else {
10613                 u8 *ptr;
10614                 ssize_t cnt;
10615                 unsigned int pos = 0;
10616
10617                 ptr = (u8 *)&buf[0];
10618                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10619                         cnt = pci_read_vpd(tp->pdev, pos,
10620                                            len - pos, ptr);
10621                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10622                                 cnt = 0;
10623                         else if (cnt < 0)
10624                                 goto error;
10625                 }
10626                 if (pos != len)
10627                         goto error;
10628         }
10629
10630         return buf;
10631
10632 error:
10633         kfree(buf);
10634         return NULL;
10635 }
10636
10637 #define NVRAM_TEST_SIZE 0x100
10638 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10639 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10640 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10641 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10642 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10643 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x4c
10644 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10645 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10646
10647 static int tg3_test_nvram(struct tg3 *tp)
10648 {
10649         u32 csum, magic;
10650         __be32 *buf;
10651         int i, j, k, err = 0, size;
10652
10653         if (tg3_flag(tp, NO_NVRAM))
10654                 return 0;
10655
10656         if (tg3_nvram_read(tp, 0, &magic) != 0)
10657                 return -EIO;
10658
10659         if (magic == TG3_EEPROM_MAGIC)
10660                 size = NVRAM_TEST_SIZE;
10661         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10662                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10663                     TG3_EEPROM_SB_FORMAT_1) {
10664                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10665                         case TG3_EEPROM_SB_REVISION_0:
10666                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10667                                 break;
10668                         case TG3_EEPROM_SB_REVISION_2:
10669                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10670                                 break;
10671                         case TG3_EEPROM_SB_REVISION_3:
10672                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10673                                 break;
10674                         case TG3_EEPROM_SB_REVISION_4:
10675                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10676                                 break;
10677                         case TG3_EEPROM_SB_REVISION_5:
10678                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10679                                 break;
10680                         case TG3_EEPROM_SB_REVISION_6:
10681                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10682                                 break;
10683                         default:
10684                                 return -EIO;
10685                         }
10686                 } else
10687                         return 0;
10688         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10689                 size = NVRAM_SELFBOOT_HW_SIZE;
10690         else
10691                 return -EIO;
10692
10693         buf = kmalloc(size, GFP_KERNEL);
10694         if (buf == NULL)
10695                 return -ENOMEM;
10696
10697         err = -EIO;
10698         for (i = 0, j = 0; i < size; i += 4, j++) {
10699                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10700                 if (err)
10701                         break;
10702         }
10703         if (i < size)
10704                 goto out;
10705
10706         /* Selfboot format */
10707         magic = be32_to_cpu(buf[0]);
10708         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10709             TG3_EEPROM_MAGIC_FW) {
10710                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10711
10712                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10713                     TG3_EEPROM_SB_REVISION_2) {
10714                         /* For rev 2, the csum doesn't include the MBA. */
10715                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10716                                 csum8 += buf8[i];
10717                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10718                                 csum8 += buf8[i];
10719                 } else {
10720                         for (i = 0; i < size; i++)
10721                                 csum8 += buf8[i];
10722                 }
10723
10724                 if (csum8 == 0) {
10725                         err = 0;
10726                         goto out;
10727                 }
10728
10729                 err = -EIO;
10730                 goto out;
10731         }
10732
10733         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10734             TG3_EEPROM_MAGIC_HW) {
10735                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10736                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10737                 u8 *buf8 = (u8 *) buf;
10738
10739                 /* Separate the parity bits and the data bytes.  */
10740                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10741                         if ((i == 0) || (i == 8)) {
10742                                 int l;
10743                                 u8 msk;
10744
10745                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10746                                         parity[k++] = buf8[i] & msk;
10747                                 i++;
10748                         } else if (i == 16) {
10749                                 int l;
10750                                 u8 msk;
10751
10752                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10753                                         parity[k++] = buf8[i] & msk;
10754                                 i++;
10755
10756                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10757                                         parity[k++] = buf8[i] & msk;
10758                                 i++;
10759                         }
10760                         data[j++] = buf8[i];
10761                 }
10762
10763                 err = -EIO;
10764                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10765                         u8 hw8 = hweight8(data[i]);
10766
10767                         if ((hw8 & 0x1) && parity[i])
10768                                 goto out;
10769                         else if (!(hw8 & 0x1) && !parity[i])
10770                                 goto out;
10771                 }
10772                 err = 0;
10773                 goto out;
10774         }
10775
10776         err = -EIO;
10777
10778         /* Bootstrap checksum at offset 0x10 */
10779         csum = calc_crc((unsigned char *) buf, 0x10);
10780         if (csum != le32_to_cpu(buf[0x10/4]))
10781                 goto out;
10782
10783         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10784         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10785         if (csum != le32_to_cpu(buf[0xfc/4]))
10786                 goto out;
10787
10788         kfree(buf);
10789
10790         buf = tg3_vpd_readblock(tp);
10791         if (!buf)
10792                 return -ENOMEM;
10793
10794         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10795                              PCI_VPD_LRDT_RO_DATA);
10796         if (i > 0) {
10797                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10798                 if (j < 0)
10799                         goto out;
10800
10801                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10802                         goto out;
10803
10804                 i += PCI_VPD_LRDT_TAG_SIZE;
10805                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10806                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10807                 if (j > 0) {
10808                         u8 csum8 = 0;
10809
10810                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10811
10812                         for (i = 0; i <= j; i++)
10813                                 csum8 += ((u8 *)buf)[i];
10814
10815                         if (csum8)
10816                                 goto out;
10817                 }
10818         }
10819
10820         err = 0;
10821
10822 out:
10823         kfree(buf);
10824         return err;
10825 }
10826
10827 #define TG3_SERDES_TIMEOUT_SEC  2
10828 #define TG3_COPPER_TIMEOUT_SEC  6
10829
10830 static int tg3_test_link(struct tg3 *tp)
10831 {
10832         int i, max;
10833
10834         if (!netif_running(tp->dev))
10835                 return -ENODEV;
10836
10837         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10838                 max = TG3_SERDES_TIMEOUT_SEC;
10839         else
10840                 max = TG3_COPPER_TIMEOUT_SEC;
10841
10842         for (i = 0; i < max; i++) {
10843                 if (netif_carrier_ok(tp->dev))
10844                         return 0;
10845
10846                 if (msleep_interruptible(1000))
10847                         break;
10848         }
10849
10850         return -EIO;
10851 }
10852
10853 /* Only test the commonly used registers */
10854 static int tg3_test_registers(struct tg3 *tp)
10855 {
10856         int i, is_5705, is_5750;
10857         u32 offset, read_mask, write_mask, val, save_val, read_val;
10858         static struct {
10859                 u16 offset;
10860                 u16 flags;
10861 #define TG3_FL_5705     0x1
10862 #define TG3_FL_NOT_5705 0x2
10863 #define TG3_FL_NOT_5788 0x4
10864 #define TG3_FL_NOT_5750 0x8
10865                 u32 read_mask;
10866                 u32 write_mask;
10867         } reg_tbl[] = {
10868                 /* MAC Control Registers */
10869                 { MAC_MODE, TG3_FL_NOT_5705,
10870                         0x00000000, 0x00ef6f8c },
10871                 { MAC_MODE, TG3_FL_5705,
10872                         0x00000000, 0x01ef6b8c },
10873                 { MAC_STATUS, TG3_FL_NOT_5705,
10874                         0x03800107, 0x00000000 },
10875                 { MAC_STATUS, TG3_FL_5705,
10876                         0x03800100, 0x00000000 },
10877                 { MAC_ADDR_0_HIGH, 0x0000,
10878                         0x00000000, 0x0000ffff },
10879                 { MAC_ADDR_0_LOW, 0x0000,
10880                         0x00000000, 0xffffffff },
10881                 { MAC_RX_MTU_SIZE, 0x0000,
10882                         0x00000000, 0x0000ffff },
10883                 { MAC_TX_MODE, 0x0000,
10884                         0x00000000, 0x00000070 },
10885                 { MAC_TX_LENGTHS, 0x0000,
10886                         0x00000000, 0x00003fff },
10887                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10888                         0x00000000, 0x000007fc },
10889                 { MAC_RX_MODE, TG3_FL_5705,
10890                         0x00000000, 0x000007dc },
10891                 { MAC_HASH_REG_0, 0x0000,
10892                         0x00000000, 0xffffffff },
10893                 { MAC_HASH_REG_1, 0x0000,
10894                         0x00000000, 0xffffffff },
10895                 { MAC_HASH_REG_2, 0x0000,
10896                         0x00000000, 0xffffffff },
10897                 { MAC_HASH_REG_3, 0x0000,
10898                         0x00000000, 0xffffffff },
10899
10900                 /* Receive Data and Receive BD Initiator Control Registers. */
10901                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10902                         0x00000000, 0xffffffff },
10903                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10904                         0x00000000, 0xffffffff },
10905                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10906                         0x00000000, 0x00000003 },
10907                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10908                         0x00000000, 0xffffffff },
10909                 { RCVDBDI_STD_BD+0, 0x0000,
10910                         0x00000000, 0xffffffff },
10911                 { RCVDBDI_STD_BD+4, 0x0000,
10912                         0x00000000, 0xffffffff },
10913                 { RCVDBDI_STD_BD+8, 0x0000,
10914                         0x00000000, 0xffff0002 },
10915                 { RCVDBDI_STD_BD+0xc, 0x0000,
10916                         0x00000000, 0xffffffff },
10917
10918                 /* Receive BD Initiator Control Registers. */
10919                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10920                         0x00000000, 0xffffffff },
10921                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10922                         0x00000000, 0x000003ff },
10923                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10924                         0x00000000, 0xffffffff },
10925
10926                 /* Host Coalescing Control Registers. */
10927                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10928                         0x00000000, 0x00000004 },
10929                 { HOSTCC_MODE, TG3_FL_5705,
10930                         0x00000000, 0x000000f6 },
10931                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10932                         0x00000000, 0xffffffff },
10933                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10934                         0x00000000, 0x000003ff },
10935                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10936                         0x00000000, 0xffffffff },
10937                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10938                         0x00000000, 0x000003ff },
10939                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10940                         0x00000000, 0xffffffff },
10941                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10942                         0x00000000, 0x000000ff },
10943                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10944                         0x00000000, 0xffffffff },
10945                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10946                         0x00000000, 0x000000ff },
10947                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10948                         0x00000000, 0xffffffff },
10949                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10950                         0x00000000, 0xffffffff },
10951                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10952                         0x00000000, 0xffffffff },
10953                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10954                         0x00000000, 0x000000ff },
10955                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10956                         0x00000000, 0xffffffff },
10957                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10958                         0x00000000, 0x000000ff },
10959                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10960                         0x00000000, 0xffffffff },
10961                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10962                         0x00000000, 0xffffffff },
10963                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10964                         0x00000000, 0xffffffff },
10965                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10966                         0x00000000, 0xffffffff },
10967                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10968                         0x00000000, 0xffffffff },
10969                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10970                         0xffffffff, 0x00000000 },
10971                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10972                         0xffffffff, 0x00000000 },
10973
10974                 /* Buffer Manager Control Registers. */
10975                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10976                         0x00000000, 0x007fff80 },
10977                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10978                         0x00000000, 0x007fffff },
10979                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10980                         0x00000000, 0x0000003f },
10981                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10982                         0x00000000, 0x000001ff },
10983                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10984                         0x00000000, 0x000001ff },
10985                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10986                         0xffffffff, 0x00000000 },
10987                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10988                         0xffffffff, 0x00000000 },
10989
10990                 /* Mailbox Registers */
10991                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10992                         0x00000000, 0x000001ff },
10993                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10994                         0x00000000, 0x000001ff },
10995                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10996                         0x00000000, 0x000007ff },
10997                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10998                         0x00000000, 0x000001ff },
10999
11000                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11001         };
11002
11003         is_5705 = is_5750 = 0;
11004         if (tg3_flag(tp, 5705_PLUS)) {
11005                 is_5705 = 1;
11006                 if (tg3_flag(tp, 5750_PLUS))
11007                         is_5750 = 1;
11008         }
11009
11010         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11011                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11012                         continue;
11013
11014                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11015                         continue;
11016
11017                 if (tg3_flag(tp, IS_5788) &&
11018                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11019                         continue;
11020
11021                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11022                         continue;
11023
11024                 offset = (u32) reg_tbl[i].offset;
11025                 read_mask = reg_tbl[i].read_mask;
11026                 write_mask = reg_tbl[i].write_mask;
11027
11028                 /* Save the original register content */
11029                 save_val = tr32(offset);
11030
11031                 /* Determine the read-only value. */
11032                 read_val = save_val & read_mask;
11033
11034                 /* Write zero to the register, then make sure the read-only bits
11035                  * are not changed and the read/write bits are all zeros.
11036                  */
11037                 tw32(offset, 0);
11038
11039                 val = tr32(offset);
11040
11041                 /* Test the read-only and read/write bits. */
11042                 if (((val & read_mask) != read_val) || (val & write_mask))
11043                         goto out;
11044
11045                 /* Write ones to all the bits defined by RdMask and WrMask, then
11046                  * make sure the read-only bits are not changed and the
11047                  * read/write bits are all ones.
11048                  */
11049                 tw32(offset, read_mask | write_mask);
11050
11051                 val = tr32(offset);
11052
11053                 /* Test the read-only bits. */
11054                 if ((val & read_mask) != read_val)
11055                         goto out;
11056
11057                 /* Test the read/write bits. */
11058                 if ((val & write_mask) != write_mask)
11059                         goto out;
11060
11061                 tw32(offset, save_val);
11062         }
11063
11064         return 0;
11065
11066 out:
11067         if (netif_msg_hw(tp))
11068                 netdev_err(tp->dev,
11069                            "Register test failed at offset %x\n", offset);
11070         tw32(offset, save_val);
11071         return -EIO;
11072 }
11073
11074 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11075 {
11076         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11077         int i;
11078         u32 j;
11079
11080         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11081                 for (j = 0; j < len; j += 4) {
11082                         u32 val;
11083
11084                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11085                         tg3_read_mem(tp, offset + j, &val);
11086                         if (val != test_pattern[i])
11087                                 return -EIO;
11088                 }
11089         }
11090         return 0;
11091 }
11092
11093 static int tg3_test_memory(struct tg3 *tp)
11094 {
11095         static struct mem_entry {
11096                 u32 offset;
11097                 u32 len;
11098         } mem_tbl_570x[] = {
11099                 { 0x00000000, 0x00b50},
11100                 { 0x00002000, 0x1c000},
11101                 { 0xffffffff, 0x00000}
11102         }, mem_tbl_5705[] = {
11103                 { 0x00000100, 0x0000c},
11104                 { 0x00000200, 0x00008},
11105                 { 0x00004000, 0x00800},
11106                 { 0x00006000, 0x01000},
11107                 { 0x00008000, 0x02000},
11108                 { 0x00010000, 0x0e000},
11109                 { 0xffffffff, 0x00000}
11110         }, mem_tbl_5755[] = {
11111                 { 0x00000200, 0x00008},
11112                 { 0x00004000, 0x00800},
11113                 { 0x00006000, 0x00800},
11114                 { 0x00008000, 0x02000},
11115                 { 0x00010000, 0x0c000},
11116                 { 0xffffffff, 0x00000}
11117         }, mem_tbl_5906[] = {
11118                 { 0x00000200, 0x00008},
11119                 { 0x00004000, 0x00400},
11120                 { 0x00006000, 0x00400},
11121                 { 0x00008000, 0x01000},
11122                 { 0x00010000, 0x01000},
11123                 { 0xffffffff, 0x00000}
11124         }, mem_tbl_5717[] = {
11125                 { 0x00000200, 0x00008},
11126                 { 0x00010000, 0x0a000},
11127                 { 0x00020000, 0x13c00},
11128                 { 0xffffffff, 0x00000}
11129         }, mem_tbl_57765[] = {
11130                 { 0x00000200, 0x00008},
11131                 { 0x00004000, 0x00800},
11132                 { 0x00006000, 0x09800},
11133                 { 0x00010000, 0x0a000},
11134                 { 0xffffffff, 0x00000}
11135         };
11136         struct mem_entry *mem_tbl;
11137         int err = 0;
11138         int i;
11139
11140         if (tg3_flag(tp, 5717_PLUS))
11141                 mem_tbl = mem_tbl_5717;
11142         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11143                 mem_tbl = mem_tbl_57765;
11144         else if (tg3_flag(tp, 5755_PLUS))
11145                 mem_tbl = mem_tbl_5755;
11146         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11147                 mem_tbl = mem_tbl_5906;
11148         else if (tg3_flag(tp, 5705_PLUS))
11149                 mem_tbl = mem_tbl_5705;
11150         else
11151                 mem_tbl = mem_tbl_570x;
11152
11153         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11154                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11155                 if (err)
11156                         break;
11157         }
11158
11159         return err;
11160 }
11161
11162 #define TG3_MAC_LOOPBACK        0
11163 #define TG3_PHY_LOOPBACK        1
11164 #define TG3_TSO_LOOPBACK        2
11165
11166 #define TG3_TSO_MSS             500
11167
11168 #define TG3_TSO_IP_HDR_LEN      20
11169 #define TG3_TSO_TCP_HDR_LEN     20
11170 #define TG3_TSO_TCP_OPT_LEN     12
11171
11172 static const u8 tg3_tso_header[] = {
11173 0x08, 0x00,
11174 0x45, 0x00, 0x00, 0x00,
11175 0x00, 0x00, 0x40, 0x00,
11176 0x40, 0x06, 0x00, 0x00,
11177 0x0a, 0x00, 0x00, 0x01,
11178 0x0a, 0x00, 0x00, 0x02,
11179 0x0d, 0x00, 0xe0, 0x00,
11180 0x00, 0x00, 0x01, 0x00,
11181 0x00, 0x00, 0x02, 0x00,
11182 0x80, 0x10, 0x10, 0x00,
11183 0x14, 0x09, 0x00, 0x00,
11184 0x01, 0x01, 0x08, 0x0a,
11185 0x11, 0x11, 0x11, 0x11,
11186 0x11, 0x11, 0x11, 0x11,
11187 };
11188
11189 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11190 {
11191         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11192         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11193         struct sk_buff *skb, *rx_skb;
11194         u8 *tx_data;
11195         dma_addr_t map;
11196         int num_pkts, tx_len, rx_len, i, err;
11197         struct tg3_rx_buffer_desc *desc;
11198         struct tg3_napi *tnapi, *rnapi;
11199         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11200
11201         tnapi = &tp->napi[0];
11202         rnapi = &tp->napi[0];
11203         if (tp->irq_cnt > 1) {
11204                 if (tg3_flag(tp, ENABLE_RSS))
11205                         rnapi = &tp->napi[1];
11206                 if (tg3_flag(tp, ENABLE_TSS))
11207                         tnapi = &tp->napi[1];
11208         }
11209         coal_now = tnapi->coal_now | rnapi->coal_now;
11210
11211         if (loopback_mode == TG3_MAC_LOOPBACK) {
11212                 /* HW errata - mac loopback fails in some cases on 5780.
11213                  * Normal traffic and PHY loopback are not affected by
11214                  * errata.  Also, the MAC loopback test is deprecated for
11215                  * all newer ASIC revisions.
11216                  */
11217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11218                     tg3_flag(tp, CPMU_PRESENT))
11219                         return 0;
11220
11221                 mac_mode = tp->mac_mode &
11222                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11223                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11224                 if (!tg3_flag(tp, 5705_PLUS))
11225                         mac_mode |= MAC_MODE_LINK_POLARITY;
11226                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11227                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11228                 else
11229                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11230                 tw32(MAC_MODE, mac_mode);
11231         } else {
11232                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11233                         tg3_phy_fet_toggle_apd(tp, false);
11234                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11235                 } else
11236                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11237
11238                 tg3_phy_toggle_automdix(tp, 0);
11239
11240                 tg3_writephy(tp, MII_BMCR, val);
11241                 udelay(40);
11242
11243                 mac_mode = tp->mac_mode &
11244                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11245                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11246                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11247                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11248                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11249                         /* The write needs to be flushed for the AC131 */
11250                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11251                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11252                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11253                 } else
11254                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11255
11256                 /* reset to prevent losing 1st rx packet intermittently */
11257                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11258                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11259                         udelay(10);
11260                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11261                 }
11262                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11263                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11264                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11265                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11266                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11267                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11268                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11269                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11270                 }
11271                 tw32(MAC_MODE, mac_mode);
11272
11273                 /* Wait for link */
11274                 for (i = 0; i < 100; i++) {
11275                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11276                                 break;
11277                         mdelay(1);
11278                 }
11279         }
11280
11281         err = -EIO;
11282
11283         tx_len = pktsz;
11284         skb = netdev_alloc_skb(tp->dev, tx_len);
11285         if (!skb)
11286                 return -ENOMEM;
11287
11288         tx_data = skb_put(skb, tx_len);
11289         memcpy(tx_data, tp->dev->dev_addr, 6);
11290         memset(tx_data + 6, 0x0, 8);
11291
11292         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11293
11294         if (loopback_mode == TG3_TSO_LOOPBACK) {
11295                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11296
11297                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11298                               TG3_TSO_TCP_OPT_LEN;
11299
11300                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11301                        sizeof(tg3_tso_header));
11302                 mss = TG3_TSO_MSS;
11303
11304                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11305                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11306
11307                 /* Set the total length field in the IP header */
11308                 iph->tot_len = htons((u16)(mss + hdr_len));
11309
11310                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11311                               TXD_FLAG_CPU_POST_DMA);
11312
11313                 if (tg3_flag(tp, HW_TSO_1) ||
11314                     tg3_flag(tp, HW_TSO_2) ||
11315                     tg3_flag(tp, HW_TSO_3)) {
11316                         struct tcphdr *th;
11317                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11318                         th = (struct tcphdr *)&tx_data[val];
11319                         th->check = 0;
11320                 } else
11321                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11322
11323                 if (tg3_flag(tp, HW_TSO_3)) {
11324                         mss |= (hdr_len & 0xc) << 12;
11325                         if (hdr_len & 0x10)
11326                                 base_flags |= 0x00000010;
11327                         base_flags |= (hdr_len & 0x3e0) << 5;
11328                 } else if (tg3_flag(tp, HW_TSO_2))
11329                         mss |= hdr_len << 9;
11330                 else if (tg3_flag(tp, HW_TSO_1) ||
11331                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11332                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11333                 } else {
11334                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11335                 }
11336
11337                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11338         } else {
11339                 num_pkts = 1;
11340                 data_off = ETH_HLEN;
11341         }
11342
11343         for (i = data_off; i < tx_len; i++)
11344                 tx_data[i] = (u8) (i & 0xff);
11345
11346         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11347         if (pci_dma_mapping_error(tp->pdev, map)) {
11348                 dev_kfree_skb(skb);
11349                 return -EIO;
11350         }
11351
11352         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11353                rnapi->coal_now);
11354
11355         udelay(10);
11356
11357         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11358
11359         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11360                     base_flags, (mss << 1) | 1);
11361
11362         tnapi->tx_prod++;
11363
11364         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11365         tr32_mailbox(tnapi->prodmbox);
11366
11367         udelay(10);
11368
11369         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11370         for (i = 0; i < 35; i++) {
11371                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11372                        coal_now);
11373
11374                 udelay(10);
11375
11376                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11377                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11378                 if ((tx_idx == tnapi->tx_prod) &&
11379                     (rx_idx == (rx_start_idx + num_pkts)))
11380                         break;
11381         }
11382
11383         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11384         dev_kfree_skb(skb);
11385
11386         if (tx_idx != tnapi->tx_prod)
11387                 goto out;
11388
11389         if (rx_idx != rx_start_idx + num_pkts)
11390                 goto out;
11391
11392         val = data_off;
11393         while (rx_idx != rx_start_idx) {
11394                 desc = &rnapi->rx_rcb[rx_start_idx++];
11395                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11396                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11397
11398                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11399                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11400                         goto out;
11401
11402                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11403                          - ETH_FCS_LEN;
11404
11405                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11406                         if (rx_len != tx_len)
11407                                 goto out;
11408
11409                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11410                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11411                                         goto out;
11412                         } else {
11413                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11414                                         goto out;
11415                         }
11416                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11417                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11418                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11419                         goto out;
11420                 }
11421
11422                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11423                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11424                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11425                                              mapping);
11426                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11427                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11428                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11429                                              mapping);
11430                 } else
11431                         goto out;
11432
11433                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11434                                             PCI_DMA_FROMDEVICE);
11435
11436                 for (i = data_off; i < rx_len; i++, val++) {
11437                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11438                                 goto out;
11439                 }
11440         }
11441
11442         err = 0;
11443
11444         /* tg3_free_rings will unmap and free the rx_skb */
11445 out:
11446         return err;
11447 }
11448
11449 #define TG3_STD_LOOPBACK_FAILED         1
11450 #define TG3_JMB_LOOPBACK_FAILED         2
11451 #define TG3_TSO_LOOPBACK_FAILED         4
11452
11453 #define TG3_MAC_LOOPBACK_SHIFT          0
11454 #define TG3_PHY_LOOPBACK_SHIFT          4
11455 #define TG3_LOOPBACK_FAILED             0x00000077
11456
11457 static int tg3_test_loopback(struct tg3 *tp)
11458 {
11459         int err = 0;
11460         u32 eee_cap, cpmuctrl = 0;
11461
11462         if (!netif_running(tp->dev))
11463                 return TG3_LOOPBACK_FAILED;
11464
11465         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11466         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11467
11468         err = tg3_reset_hw(tp, 1);
11469         if (err) {
11470                 err = TG3_LOOPBACK_FAILED;
11471                 goto done;
11472         }
11473
11474         if (tg3_flag(tp, ENABLE_RSS)) {
11475                 int i;
11476
11477                 /* Reroute all rx packets to the 1st queue */
11478                 for (i = MAC_RSS_INDIR_TBL_0;
11479                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11480                         tw32(i, 0x0);
11481         }
11482
11483         /* Turn off gphy autopowerdown. */
11484         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11485                 tg3_phy_toggle_apd(tp, false);
11486
11487         if (tg3_flag(tp, CPMU_PRESENT)) {
11488                 int i;
11489                 u32 status;
11490
11491                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11492
11493                 /* Wait for up to 40 microseconds to acquire lock. */
11494                 for (i = 0; i < 4; i++) {
11495                         status = tr32(TG3_CPMU_MUTEX_GNT);
11496                         if (status == CPMU_MUTEX_GNT_DRIVER)
11497                                 break;
11498                         udelay(10);
11499                 }
11500
11501                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11502                         err = TG3_LOOPBACK_FAILED;
11503                         goto done;
11504                 }
11505
11506                 /* Turn off link-based power management. */
11507                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11508                 tw32(TG3_CPMU_CTRL,
11509                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11510                                   CPMU_CTRL_LINK_AWARE_MODE));
11511         }
11512
11513         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11514                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11515
11516         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11517             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11518                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11519
11520         if (tg3_flag(tp, CPMU_PRESENT)) {
11521                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11522
11523                 /* Release the mutex */
11524                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11525         }
11526
11527         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11528             !tg3_flag(tp, USE_PHYLIB)) {
11529                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11530                         err |= TG3_STD_LOOPBACK_FAILED <<
11531                                TG3_PHY_LOOPBACK_SHIFT;
11532                 if (tg3_flag(tp, TSO_CAPABLE) &&
11533                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11534                         err |= TG3_TSO_LOOPBACK_FAILED <<
11535                                TG3_PHY_LOOPBACK_SHIFT;
11536                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11537                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11538                         err |= TG3_JMB_LOOPBACK_FAILED <<
11539                                TG3_PHY_LOOPBACK_SHIFT;
11540         }
11541
11542         /* Re-enable gphy autopowerdown. */
11543         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11544                 tg3_phy_toggle_apd(tp, true);
11545
11546 done:
11547         tp->phy_flags |= eee_cap;
11548
11549         return err;
11550 }
11551
11552 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11553                           u64 *data)
11554 {
11555         struct tg3 *tp = netdev_priv(dev);
11556
11557         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11558             tg3_power_up(tp)) {
11559                 etest->flags |= ETH_TEST_FL_FAILED;
11560                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11561                 return;
11562         }
11563
11564         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11565
11566         if (tg3_test_nvram(tp) != 0) {
11567                 etest->flags |= ETH_TEST_FL_FAILED;
11568                 data[0] = 1;
11569         }
11570         if (tg3_test_link(tp) != 0) {
11571                 etest->flags |= ETH_TEST_FL_FAILED;
11572                 data[1] = 1;
11573         }
11574         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11575                 int err, err2 = 0, irq_sync = 0;
11576
11577                 if (netif_running(dev)) {
11578                         tg3_phy_stop(tp);
11579                         tg3_netif_stop(tp);
11580                         irq_sync = 1;
11581                 }
11582
11583                 tg3_full_lock(tp, irq_sync);
11584
11585                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11586                 err = tg3_nvram_lock(tp);
11587                 tg3_halt_cpu(tp, RX_CPU_BASE);
11588                 if (!tg3_flag(tp, 5705_PLUS))
11589                         tg3_halt_cpu(tp, TX_CPU_BASE);
11590                 if (!err)
11591                         tg3_nvram_unlock(tp);
11592
11593                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11594                         tg3_phy_reset(tp);
11595
11596                 if (tg3_test_registers(tp) != 0) {
11597                         etest->flags |= ETH_TEST_FL_FAILED;
11598                         data[2] = 1;
11599                 }
11600                 if (tg3_test_memory(tp) != 0) {
11601                         etest->flags |= ETH_TEST_FL_FAILED;
11602                         data[3] = 1;
11603                 }
11604                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11605                         etest->flags |= ETH_TEST_FL_FAILED;
11606
11607                 tg3_full_unlock(tp);
11608
11609                 if (tg3_test_interrupt(tp) != 0) {
11610                         etest->flags |= ETH_TEST_FL_FAILED;
11611                         data[5] = 1;
11612                 }
11613
11614                 tg3_full_lock(tp, 0);
11615
11616                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11617                 if (netif_running(dev)) {
11618                         tg3_flag_set(tp, INIT_COMPLETE);
11619                         err2 = tg3_restart_hw(tp, 1);
11620                         if (!err2)
11621                                 tg3_netif_start(tp);
11622                 }
11623
11624                 tg3_full_unlock(tp);
11625
11626                 if (irq_sync && !err2)
11627                         tg3_phy_start(tp);
11628         }
11629         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11630                 tg3_power_down(tp);
11631
11632 }
11633
11634 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11635 {
11636         struct mii_ioctl_data *data = if_mii(ifr);
11637         struct tg3 *tp = netdev_priv(dev);
11638         int err;
11639
11640         if (tg3_flag(tp, USE_PHYLIB)) {
11641                 struct phy_device *phydev;
11642                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11643                         return -EAGAIN;
11644                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11645                 return phy_mii_ioctl(phydev, ifr, cmd);
11646         }
11647
11648         switch (cmd) {
11649         case SIOCGMIIPHY:
11650                 data->phy_id = tp->phy_addr;
11651
11652                 /* fallthru */
11653         case SIOCGMIIREG: {
11654                 u32 mii_regval;
11655
11656                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11657                         break;                  /* We have no PHY */
11658
11659                 if (!netif_running(dev))
11660                         return -EAGAIN;
11661
11662                 spin_lock_bh(&tp->lock);
11663                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11664                 spin_unlock_bh(&tp->lock);
11665
11666                 data->val_out = mii_regval;
11667
11668                 return err;
11669         }
11670
11671         case SIOCSMIIREG:
11672                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11673                         break;                  /* We have no PHY */
11674
11675                 if (!netif_running(dev))
11676                         return -EAGAIN;
11677
11678                 spin_lock_bh(&tp->lock);
11679                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11680                 spin_unlock_bh(&tp->lock);
11681
11682                 return err;
11683
11684         default:
11685                 /* do nothing */
11686                 break;
11687         }
11688         return -EOPNOTSUPP;
11689 }
11690
11691 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11692 {
11693         struct tg3 *tp = netdev_priv(dev);
11694
11695         memcpy(ec, &tp->coal, sizeof(*ec));
11696         return 0;
11697 }
11698
11699 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11700 {
11701         struct tg3 *tp = netdev_priv(dev);
11702         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11703         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11704
11705         if (!tg3_flag(tp, 5705_PLUS)) {
11706                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11707                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11708                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11709                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11710         }
11711
11712         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11713             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11714             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11715             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11716             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11717             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11718             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11719             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11720             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11721             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11722                 return -EINVAL;
11723
11724         /* No rx interrupts will be generated if both are zero */
11725         if ((ec->rx_coalesce_usecs == 0) &&
11726             (ec->rx_max_coalesced_frames == 0))
11727                 return -EINVAL;
11728
11729         /* No tx interrupts will be generated if both are zero */
11730         if ((ec->tx_coalesce_usecs == 0) &&
11731             (ec->tx_max_coalesced_frames == 0))
11732                 return -EINVAL;
11733
11734         /* Only copy relevant parameters, ignore all others. */
11735         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11736         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11737         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11738         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11739         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11740         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11741         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11742         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11743         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11744
11745         if (netif_running(dev)) {
11746                 tg3_full_lock(tp, 0);
11747                 __tg3_set_coalesce(tp, &tp->coal);
11748                 tg3_full_unlock(tp);
11749         }
11750         return 0;
11751 }
11752
11753 static const struct ethtool_ops tg3_ethtool_ops = {
11754         .get_settings           = tg3_get_settings,
11755         .set_settings           = tg3_set_settings,
11756         .get_drvinfo            = tg3_get_drvinfo,
11757         .get_regs_len           = tg3_get_regs_len,
11758         .get_regs               = tg3_get_regs,
11759         .get_wol                = tg3_get_wol,
11760         .set_wol                = tg3_set_wol,
11761         .get_msglevel           = tg3_get_msglevel,
11762         .set_msglevel           = tg3_set_msglevel,
11763         .nway_reset             = tg3_nway_reset,
11764         .get_link               = ethtool_op_get_link,
11765         .get_eeprom_len         = tg3_get_eeprom_len,
11766         .get_eeprom             = tg3_get_eeprom,
11767         .set_eeprom             = tg3_set_eeprom,
11768         .get_ringparam          = tg3_get_ringparam,
11769         .set_ringparam          = tg3_set_ringparam,
11770         .get_pauseparam         = tg3_get_pauseparam,
11771         .set_pauseparam         = tg3_set_pauseparam,
11772         .self_test              = tg3_self_test,
11773         .get_strings            = tg3_get_strings,
11774         .set_phys_id            = tg3_set_phys_id,
11775         .get_ethtool_stats      = tg3_get_ethtool_stats,
11776         .get_coalesce           = tg3_get_coalesce,
11777         .set_coalesce           = tg3_set_coalesce,
11778         .get_sset_count         = tg3_get_sset_count,
11779 };
11780
11781 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11782 {
11783         u32 cursize, val, magic;
11784
11785         tp->nvram_size = EEPROM_CHIP_SIZE;
11786
11787         if (tg3_nvram_read(tp, 0, &magic) != 0)
11788                 return;
11789
11790         if ((magic != TG3_EEPROM_MAGIC) &&
11791             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11792             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11793                 return;
11794
11795         /*
11796          * Size the chip by reading offsets at increasing powers of two.
11797          * When we encounter our validation signature, we know the addressing
11798          * has wrapped around, and thus have our chip size.
11799          */
11800         cursize = 0x10;
11801
11802         while (cursize < tp->nvram_size) {
11803                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11804                         return;
11805
11806                 if (val == magic)
11807                         break;
11808
11809                 cursize <<= 1;
11810         }
11811
11812         tp->nvram_size = cursize;
11813 }
11814
11815 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11816 {
11817         u32 val;
11818
11819         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11820                 return;
11821
11822         /* Selfboot format */
11823         if (val != TG3_EEPROM_MAGIC) {
11824                 tg3_get_eeprom_size(tp);
11825                 return;
11826         }
11827
11828         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11829                 if (val != 0) {
11830                         /* This is confusing.  We want to operate on the
11831                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11832                          * call will read from NVRAM and byteswap the data
11833                          * according to the byteswapping settings for all
11834                          * other register accesses.  This ensures the data we
11835                          * want will always reside in the lower 16-bits.
11836                          * However, the data in NVRAM is in LE format, which
11837                          * means the data from the NVRAM read will always be
11838                          * opposite the endianness of the CPU.  The 16-bit
11839                          * byteswap then brings the data to CPU endianness.
11840                          */
11841                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11842                         return;
11843                 }
11844         }
11845         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11846 }
11847
11848 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11849 {
11850         u32 nvcfg1;
11851
11852         nvcfg1 = tr32(NVRAM_CFG1);
11853         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11854                 tg3_flag_set(tp, FLASH);
11855         } else {
11856                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11857                 tw32(NVRAM_CFG1, nvcfg1);
11858         }
11859
11860         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11861             tg3_flag(tp, 5780_CLASS)) {
11862                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11863                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11864                         tp->nvram_jedecnum = JEDEC_ATMEL;
11865                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11866                         tg3_flag_set(tp, NVRAM_BUFFERED);
11867                         break;
11868                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11869                         tp->nvram_jedecnum = JEDEC_ATMEL;
11870                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11871                         break;
11872                 case FLASH_VENDOR_ATMEL_EEPROM:
11873                         tp->nvram_jedecnum = JEDEC_ATMEL;
11874                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11875                         tg3_flag_set(tp, NVRAM_BUFFERED);
11876                         break;
11877                 case FLASH_VENDOR_ST:
11878                         tp->nvram_jedecnum = JEDEC_ST;
11879                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11880                         tg3_flag_set(tp, NVRAM_BUFFERED);
11881                         break;
11882                 case FLASH_VENDOR_SAIFUN:
11883                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11884                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11885                         break;
11886                 case FLASH_VENDOR_SST_SMALL:
11887                 case FLASH_VENDOR_SST_LARGE:
11888                         tp->nvram_jedecnum = JEDEC_SST;
11889                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11890                         break;
11891                 }
11892         } else {
11893                 tp->nvram_jedecnum = JEDEC_ATMEL;
11894                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11895                 tg3_flag_set(tp, NVRAM_BUFFERED);
11896         }
11897 }
11898
11899 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11900 {
11901         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11902         case FLASH_5752PAGE_SIZE_256:
11903                 tp->nvram_pagesize = 256;
11904                 break;
11905         case FLASH_5752PAGE_SIZE_512:
11906                 tp->nvram_pagesize = 512;
11907                 break;
11908         case FLASH_5752PAGE_SIZE_1K:
11909                 tp->nvram_pagesize = 1024;
11910                 break;
11911         case FLASH_5752PAGE_SIZE_2K:
11912                 tp->nvram_pagesize = 2048;
11913                 break;
11914         case FLASH_5752PAGE_SIZE_4K:
11915                 tp->nvram_pagesize = 4096;
11916                 break;
11917         case FLASH_5752PAGE_SIZE_264:
11918                 tp->nvram_pagesize = 264;
11919                 break;
11920         case FLASH_5752PAGE_SIZE_528:
11921                 tp->nvram_pagesize = 528;
11922                 break;
11923         }
11924 }
11925
11926 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11927 {
11928         u32 nvcfg1;
11929
11930         nvcfg1 = tr32(NVRAM_CFG1);
11931
11932         /* NVRAM protection for TPM */
11933         if (nvcfg1 & (1 << 27))
11934                 tg3_flag_set(tp, PROTECTED_NVRAM);
11935
11936         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11937         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11938         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11939                 tp->nvram_jedecnum = JEDEC_ATMEL;
11940                 tg3_flag_set(tp, NVRAM_BUFFERED);
11941                 break;
11942         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11943                 tp->nvram_jedecnum = JEDEC_ATMEL;
11944                 tg3_flag_set(tp, NVRAM_BUFFERED);
11945                 tg3_flag_set(tp, FLASH);
11946                 break;
11947         case FLASH_5752VENDOR_ST_M45PE10:
11948         case FLASH_5752VENDOR_ST_M45PE20:
11949         case FLASH_5752VENDOR_ST_M45PE40:
11950                 tp->nvram_jedecnum = JEDEC_ST;
11951                 tg3_flag_set(tp, NVRAM_BUFFERED);
11952                 tg3_flag_set(tp, FLASH);
11953                 break;
11954         }
11955
11956         if (tg3_flag(tp, FLASH)) {
11957                 tg3_nvram_get_pagesize(tp, nvcfg1);
11958         } else {
11959                 /* For eeprom, set pagesize to maximum eeprom size */
11960                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11961
11962                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11963                 tw32(NVRAM_CFG1, nvcfg1);
11964         }
11965 }
11966
11967 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11968 {
11969         u32 nvcfg1, protect = 0;
11970
11971         nvcfg1 = tr32(NVRAM_CFG1);
11972
11973         /* NVRAM protection for TPM */
11974         if (nvcfg1 & (1 << 27)) {
11975                 tg3_flag_set(tp, PROTECTED_NVRAM);
11976                 protect = 1;
11977         }
11978
11979         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11980         switch (nvcfg1) {
11981         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11982         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11983         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11984         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11985                 tp->nvram_jedecnum = JEDEC_ATMEL;
11986                 tg3_flag_set(tp, NVRAM_BUFFERED);
11987                 tg3_flag_set(tp, FLASH);
11988                 tp->nvram_pagesize = 264;
11989                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11990                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11991                         tp->nvram_size = (protect ? 0x3e200 :
11992                                           TG3_NVRAM_SIZE_512KB);
11993                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11994                         tp->nvram_size = (protect ? 0x1f200 :
11995                                           TG3_NVRAM_SIZE_256KB);
11996                 else
11997                         tp->nvram_size = (protect ? 0x1f200 :
11998                                           TG3_NVRAM_SIZE_128KB);
11999                 break;
12000         case FLASH_5752VENDOR_ST_M45PE10:
12001         case FLASH_5752VENDOR_ST_M45PE20:
12002         case FLASH_5752VENDOR_ST_M45PE40:
12003                 tp->nvram_jedecnum = JEDEC_ST;
12004                 tg3_flag_set(tp, NVRAM_BUFFERED);
12005                 tg3_flag_set(tp, FLASH);
12006                 tp->nvram_pagesize = 256;
12007                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12008                         tp->nvram_size = (protect ?
12009                                           TG3_NVRAM_SIZE_64KB :
12010                                           TG3_NVRAM_SIZE_128KB);
12011                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12012                         tp->nvram_size = (protect ?
12013                                           TG3_NVRAM_SIZE_64KB :
12014                                           TG3_NVRAM_SIZE_256KB);
12015                 else
12016                         tp->nvram_size = (protect ?
12017                                           TG3_NVRAM_SIZE_128KB :
12018                                           TG3_NVRAM_SIZE_512KB);
12019                 break;
12020         }
12021 }
12022
12023 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12024 {
12025         u32 nvcfg1;
12026
12027         nvcfg1 = tr32(NVRAM_CFG1);
12028
12029         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12030         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12031         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12032         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12033         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12034                 tp->nvram_jedecnum = JEDEC_ATMEL;
12035                 tg3_flag_set(tp, NVRAM_BUFFERED);
12036                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12037
12038                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12039                 tw32(NVRAM_CFG1, nvcfg1);
12040                 break;
12041         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12042         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12043         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12044         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12045                 tp->nvram_jedecnum = JEDEC_ATMEL;
12046                 tg3_flag_set(tp, NVRAM_BUFFERED);
12047                 tg3_flag_set(tp, FLASH);
12048                 tp->nvram_pagesize = 264;
12049                 break;
12050         case FLASH_5752VENDOR_ST_M45PE10:
12051         case FLASH_5752VENDOR_ST_M45PE20:
12052         case FLASH_5752VENDOR_ST_M45PE40:
12053                 tp->nvram_jedecnum = JEDEC_ST;
12054                 tg3_flag_set(tp, NVRAM_BUFFERED);
12055                 tg3_flag_set(tp, FLASH);
12056                 tp->nvram_pagesize = 256;
12057                 break;
12058         }
12059 }
12060
12061 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12062 {
12063         u32 nvcfg1, protect = 0;
12064
12065         nvcfg1 = tr32(NVRAM_CFG1);
12066
12067         /* NVRAM protection for TPM */
12068         if (nvcfg1 & (1 << 27)) {
12069                 tg3_flag_set(tp, PROTECTED_NVRAM);
12070                 protect = 1;
12071         }
12072
12073         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12074         switch (nvcfg1) {
12075         case FLASH_5761VENDOR_ATMEL_ADB021D:
12076         case FLASH_5761VENDOR_ATMEL_ADB041D:
12077         case FLASH_5761VENDOR_ATMEL_ADB081D:
12078         case FLASH_5761VENDOR_ATMEL_ADB161D:
12079         case FLASH_5761VENDOR_ATMEL_MDB021D:
12080         case FLASH_5761VENDOR_ATMEL_MDB041D:
12081         case FLASH_5761VENDOR_ATMEL_MDB081D:
12082         case FLASH_5761VENDOR_ATMEL_MDB161D:
12083                 tp->nvram_jedecnum = JEDEC_ATMEL;
12084                 tg3_flag_set(tp, NVRAM_BUFFERED);
12085                 tg3_flag_set(tp, FLASH);
12086                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12087                 tp->nvram_pagesize = 256;
12088                 break;
12089         case FLASH_5761VENDOR_ST_A_M45PE20:
12090         case FLASH_5761VENDOR_ST_A_M45PE40:
12091         case FLASH_5761VENDOR_ST_A_M45PE80:
12092         case FLASH_5761VENDOR_ST_A_M45PE16:
12093         case FLASH_5761VENDOR_ST_M_M45PE20:
12094         case FLASH_5761VENDOR_ST_M_M45PE40:
12095         case FLASH_5761VENDOR_ST_M_M45PE80:
12096         case FLASH_5761VENDOR_ST_M_M45PE16:
12097                 tp->nvram_jedecnum = JEDEC_ST;
12098                 tg3_flag_set(tp, NVRAM_BUFFERED);
12099                 tg3_flag_set(tp, FLASH);
12100                 tp->nvram_pagesize = 256;
12101                 break;
12102         }
12103
12104         if (protect) {
12105                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12106         } else {
12107                 switch (nvcfg1) {
12108                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12109                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12110                 case FLASH_5761VENDOR_ST_A_M45PE16:
12111                 case FLASH_5761VENDOR_ST_M_M45PE16:
12112                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12113                         break;
12114                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12115                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12116                 case FLASH_5761VENDOR_ST_A_M45PE80:
12117                 case FLASH_5761VENDOR_ST_M_M45PE80:
12118                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12119                         break;
12120                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12121                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12122                 case FLASH_5761VENDOR_ST_A_M45PE40:
12123                 case FLASH_5761VENDOR_ST_M_M45PE40:
12124                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12125                         break;
12126                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12127                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12128                 case FLASH_5761VENDOR_ST_A_M45PE20:
12129                 case FLASH_5761VENDOR_ST_M_M45PE20:
12130                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12131                         break;
12132                 }
12133         }
12134 }
12135
12136 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12137 {
12138         tp->nvram_jedecnum = JEDEC_ATMEL;
12139         tg3_flag_set(tp, NVRAM_BUFFERED);
12140         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12141 }
12142
12143 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12144 {
12145         u32 nvcfg1;
12146
12147         nvcfg1 = tr32(NVRAM_CFG1);
12148
12149         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12150         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12151         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12152                 tp->nvram_jedecnum = JEDEC_ATMEL;
12153                 tg3_flag_set(tp, NVRAM_BUFFERED);
12154                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12155
12156                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12157                 tw32(NVRAM_CFG1, nvcfg1);
12158                 return;
12159         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12160         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12161         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12162         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12163         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12164         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12165         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12166                 tp->nvram_jedecnum = JEDEC_ATMEL;
12167                 tg3_flag_set(tp, NVRAM_BUFFERED);
12168                 tg3_flag_set(tp, FLASH);
12169
12170                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12171                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12172                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12173                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12174                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12175                         break;
12176                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12177                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12178                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12179                         break;
12180                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12181                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12182                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12183                         break;
12184                 }
12185                 break;
12186         case FLASH_5752VENDOR_ST_M45PE10:
12187         case FLASH_5752VENDOR_ST_M45PE20:
12188         case FLASH_5752VENDOR_ST_M45PE40:
12189                 tp->nvram_jedecnum = JEDEC_ST;
12190                 tg3_flag_set(tp, NVRAM_BUFFERED);
12191                 tg3_flag_set(tp, FLASH);
12192
12193                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12194                 case FLASH_5752VENDOR_ST_M45PE10:
12195                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12196                         break;
12197                 case FLASH_5752VENDOR_ST_M45PE20:
12198                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12199                         break;
12200                 case FLASH_5752VENDOR_ST_M45PE40:
12201                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12202                         break;
12203                 }
12204                 break;
12205         default:
12206                 tg3_flag_set(tp, NO_NVRAM);
12207                 return;
12208         }
12209
12210         tg3_nvram_get_pagesize(tp, nvcfg1);
12211         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12212                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12213 }
12214
12215
12216 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12217 {
12218         u32 nvcfg1;
12219
12220         nvcfg1 = tr32(NVRAM_CFG1);
12221
12222         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12223         case FLASH_5717VENDOR_ATMEL_EEPROM:
12224         case FLASH_5717VENDOR_MICRO_EEPROM:
12225                 tp->nvram_jedecnum = JEDEC_ATMEL;
12226                 tg3_flag_set(tp, NVRAM_BUFFERED);
12227                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12228
12229                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12230                 tw32(NVRAM_CFG1, nvcfg1);
12231                 return;
12232         case FLASH_5717VENDOR_ATMEL_MDB011D:
12233         case FLASH_5717VENDOR_ATMEL_ADB011B:
12234         case FLASH_5717VENDOR_ATMEL_ADB011D:
12235         case FLASH_5717VENDOR_ATMEL_MDB021D:
12236         case FLASH_5717VENDOR_ATMEL_ADB021B:
12237         case FLASH_5717VENDOR_ATMEL_ADB021D:
12238         case FLASH_5717VENDOR_ATMEL_45USPT:
12239                 tp->nvram_jedecnum = JEDEC_ATMEL;
12240                 tg3_flag_set(tp, NVRAM_BUFFERED);
12241                 tg3_flag_set(tp, FLASH);
12242
12243                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12244                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12245                         /* Detect size with tg3_nvram_get_size() */
12246                         break;
12247                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12248                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12249                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12250                         break;
12251                 default:
12252                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12253                         break;
12254                 }
12255                 break;
12256         case FLASH_5717VENDOR_ST_M_M25PE10:
12257         case FLASH_5717VENDOR_ST_A_M25PE10:
12258         case FLASH_5717VENDOR_ST_M_M45PE10:
12259         case FLASH_5717VENDOR_ST_A_M45PE10:
12260         case FLASH_5717VENDOR_ST_M_M25PE20:
12261         case FLASH_5717VENDOR_ST_A_M25PE20:
12262         case FLASH_5717VENDOR_ST_M_M45PE20:
12263         case FLASH_5717VENDOR_ST_A_M45PE20:
12264         case FLASH_5717VENDOR_ST_25USPT:
12265         case FLASH_5717VENDOR_ST_45USPT:
12266                 tp->nvram_jedecnum = JEDEC_ST;
12267                 tg3_flag_set(tp, NVRAM_BUFFERED);
12268                 tg3_flag_set(tp, FLASH);
12269
12270                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12271                 case FLASH_5717VENDOR_ST_M_M25PE20:
12272                 case FLASH_5717VENDOR_ST_M_M45PE20:
12273                         /* Detect size with tg3_nvram_get_size() */
12274                         break;
12275                 case FLASH_5717VENDOR_ST_A_M25PE20:
12276                 case FLASH_5717VENDOR_ST_A_M45PE20:
12277                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12278                         break;
12279                 default:
12280                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12281                         break;
12282                 }
12283                 break;
12284         default:
12285                 tg3_flag_set(tp, NO_NVRAM);
12286                 return;
12287         }
12288
12289         tg3_nvram_get_pagesize(tp, nvcfg1);
12290         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12291                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12292 }
12293
12294 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12295 {
12296         u32 nvcfg1, nvmpinstrp;
12297
12298         nvcfg1 = tr32(NVRAM_CFG1);
12299         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12300
12301         switch (nvmpinstrp) {
12302         case FLASH_5720_EEPROM_HD:
12303         case FLASH_5720_EEPROM_LD:
12304                 tp->nvram_jedecnum = JEDEC_ATMEL;
12305                 tg3_flag_set(tp, NVRAM_BUFFERED);
12306
12307                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12308                 tw32(NVRAM_CFG1, nvcfg1);
12309                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12310                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12311                 else
12312                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12313                 return;
12314         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12315         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12316         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12317         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12318         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12319         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12320         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12321         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12322         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12323         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12324         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12325         case FLASH_5720VENDOR_ATMEL_45USPT:
12326                 tp->nvram_jedecnum = JEDEC_ATMEL;
12327                 tg3_flag_set(tp, NVRAM_BUFFERED);
12328                 tg3_flag_set(tp, FLASH);
12329
12330                 switch (nvmpinstrp) {
12331                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12332                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12333                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12334                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12335                         break;
12336                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12337                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12338                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12339                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12340                         break;
12341                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12342                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12343                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12344                         break;
12345                 default:
12346                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12347                         break;
12348                 }
12349                 break;
12350         case FLASH_5720VENDOR_M_ST_M25PE10:
12351         case FLASH_5720VENDOR_M_ST_M45PE10:
12352         case FLASH_5720VENDOR_A_ST_M25PE10:
12353         case FLASH_5720VENDOR_A_ST_M45PE10:
12354         case FLASH_5720VENDOR_M_ST_M25PE20:
12355         case FLASH_5720VENDOR_M_ST_M45PE20:
12356         case FLASH_5720VENDOR_A_ST_M25PE20:
12357         case FLASH_5720VENDOR_A_ST_M45PE20:
12358         case FLASH_5720VENDOR_M_ST_M25PE40:
12359         case FLASH_5720VENDOR_M_ST_M45PE40:
12360         case FLASH_5720VENDOR_A_ST_M25PE40:
12361         case FLASH_5720VENDOR_A_ST_M45PE40:
12362         case FLASH_5720VENDOR_M_ST_M25PE80:
12363         case FLASH_5720VENDOR_M_ST_M45PE80:
12364         case FLASH_5720VENDOR_A_ST_M25PE80:
12365         case FLASH_5720VENDOR_A_ST_M45PE80:
12366         case FLASH_5720VENDOR_ST_25USPT:
12367         case FLASH_5720VENDOR_ST_45USPT:
12368                 tp->nvram_jedecnum = JEDEC_ST;
12369                 tg3_flag_set(tp, NVRAM_BUFFERED);
12370                 tg3_flag_set(tp, FLASH);
12371
12372                 switch (nvmpinstrp) {
12373                 case FLASH_5720VENDOR_M_ST_M25PE20:
12374                 case FLASH_5720VENDOR_M_ST_M45PE20:
12375                 case FLASH_5720VENDOR_A_ST_M25PE20:
12376                 case FLASH_5720VENDOR_A_ST_M45PE20:
12377                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12378                         break;
12379                 case FLASH_5720VENDOR_M_ST_M25PE40:
12380                 case FLASH_5720VENDOR_M_ST_M45PE40:
12381                 case FLASH_5720VENDOR_A_ST_M25PE40:
12382                 case FLASH_5720VENDOR_A_ST_M45PE40:
12383                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12384                         break;
12385                 case FLASH_5720VENDOR_M_ST_M25PE80:
12386                 case FLASH_5720VENDOR_M_ST_M45PE80:
12387                 case FLASH_5720VENDOR_A_ST_M25PE80:
12388                 case FLASH_5720VENDOR_A_ST_M45PE80:
12389                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12390                         break;
12391                 default:
12392                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12393                         break;
12394                 }
12395                 break;
12396         default:
12397                 tg3_flag_set(tp, NO_NVRAM);
12398                 return;
12399         }
12400
12401         tg3_nvram_get_pagesize(tp, nvcfg1);
12402         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12403                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12404 }
12405
12406 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12407 static void __devinit tg3_nvram_init(struct tg3 *tp)
12408 {
12409         tw32_f(GRC_EEPROM_ADDR,
12410              (EEPROM_ADDR_FSM_RESET |
12411               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12412                EEPROM_ADDR_CLKPERD_SHIFT)));
12413
12414         msleep(1);
12415
12416         /* Enable seeprom accesses. */
12417         tw32_f(GRC_LOCAL_CTRL,
12418              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12419         udelay(100);
12420
12421         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12422             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12423                 tg3_flag_set(tp, NVRAM);
12424
12425                 if (tg3_nvram_lock(tp)) {
12426                         netdev_warn(tp->dev,
12427                                     "Cannot get nvram lock, %s failed\n",
12428                                     __func__);
12429                         return;
12430                 }
12431                 tg3_enable_nvram_access(tp);
12432
12433                 tp->nvram_size = 0;
12434
12435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12436                         tg3_get_5752_nvram_info(tp);
12437                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12438                         tg3_get_5755_nvram_info(tp);
12439                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12440                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12441                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12442                         tg3_get_5787_nvram_info(tp);
12443                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12444                         tg3_get_5761_nvram_info(tp);
12445                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12446                         tg3_get_5906_nvram_info(tp);
12447                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12448                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12449                         tg3_get_57780_nvram_info(tp);
12450                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12451                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12452                         tg3_get_5717_nvram_info(tp);
12453                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12454                         tg3_get_5720_nvram_info(tp);
12455                 else
12456                         tg3_get_nvram_info(tp);
12457
12458                 if (tp->nvram_size == 0)
12459                         tg3_get_nvram_size(tp);
12460
12461                 tg3_disable_nvram_access(tp);
12462                 tg3_nvram_unlock(tp);
12463
12464         } else {
12465                 tg3_flag_clear(tp, NVRAM);
12466                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12467
12468                 tg3_get_eeprom_size(tp);
12469         }
12470 }
12471
12472 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12473                                     u32 offset, u32 len, u8 *buf)
12474 {
12475         int i, j, rc = 0;
12476         u32 val;
12477
12478         for (i = 0; i < len; i += 4) {
12479                 u32 addr;
12480                 __be32 data;
12481
12482                 addr = offset + i;
12483
12484                 memcpy(&data, buf + i, 4);
12485
12486                 /*
12487                  * The SEEPROM interface expects the data to always be opposite
12488                  * the native endian format.  We accomplish this by reversing
12489                  * all the operations that would have been performed on the
12490                  * data from a call to tg3_nvram_read_be32().
12491                  */
12492                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12493
12494                 val = tr32(GRC_EEPROM_ADDR);
12495                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12496
12497                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12498                         EEPROM_ADDR_READ);
12499                 tw32(GRC_EEPROM_ADDR, val |
12500                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12501                         (addr & EEPROM_ADDR_ADDR_MASK) |
12502                         EEPROM_ADDR_START |
12503                         EEPROM_ADDR_WRITE);
12504
12505                 for (j = 0; j < 1000; j++) {
12506                         val = tr32(GRC_EEPROM_ADDR);
12507
12508                         if (val & EEPROM_ADDR_COMPLETE)
12509                                 break;
12510                         msleep(1);
12511                 }
12512                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12513                         rc = -EBUSY;
12514                         break;
12515                 }
12516         }
12517
12518         return rc;
12519 }
12520
12521 /* offset and length are dword aligned */
12522 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12523                 u8 *buf)
12524 {
12525         int ret = 0;
12526         u32 pagesize = tp->nvram_pagesize;
12527         u32 pagemask = pagesize - 1;
12528         u32 nvram_cmd;
12529         u8 *tmp;
12530
12531         tmp = kmalloc(pagesize, GFP_KERNEL);
12532         if (tmp == NULL)
12533                 return -ENOMEM;
12534
12535         while (len) {
12536                 int j;
12537                 u32 phy_addr, page_off, size;
12538
12539                 phy_addr = offset & ~pagemask;
12540
12541                 for (j = 0; j < pagesize; j += 4) {
12542                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12543                                                   (__be32 *) (tmp + j));
12544                         if (ret)
12545                                 break;
12546                 }
12547                 if (ret)
12548                         break;
12549
12550                 page_off = offset & pagemask;
12551                 size = pagesize;
12552                 if (len < size)
12553                         size = len;
12554
12555                 len -= size;
12556
12557                 memcpy(tmp + page_off, buf, size);
12558
12559                 offset = offset + (pagesize - page_off);
12560
12561                 tg3_enable_nvram_access(tp);
12562
12563                 /*
12564                  * Before we can erase the flash page, we need
12565                  * to issue a special "write enable" command.
12566                  */
12567                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12568
12569                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12570                         break;
12571
12572                 /* Erase the target page */
12573                 tw32(NVRAM_ADDR, phy_addr);
12574
12575                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12576                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12577
12578                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12579                         break;
12580
12581                 /* Issue another write enable to start the write. */
12582                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12583
12584                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12585                         break;
12586
12587                 for (j = 0; j < pagesize; j += 4) {
12588                         __be32 data;
12589
12590                         data = *((__be32 *) (tmp + j));
12591
12592                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12593
12594                         tw32(NVRAM_ADDR, phy_addr + j);
12595
12596                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12597                                 NVRAM_CMD_WR;
12598
12599                         if (j == 0)
12600                                 nvram_cmd |= NVRAM_CMD_FIRST;
12601                         else if (j == (pagesize - 4))
12602                                 nvram_cmd |= NVRAM_CMD_LAST;
12603
12604                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12605                                 break;
12606                 }
12607                 if (ret)
12608                         break;
12609         }
12610
12611         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12612         tg3_nvram_exec_cmd(tp, nvram_cmd);
12613
12614         kfree(tmp);
12615
12616         return ret;
12617 }
12618
12619 /* offset and length are dword aligned */
12620 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12621                 u8 *buf)
12622 {
12623         int i, ret = 0;
12624
12625         for (i = 0; i < len; i += 4, offset += 4) {
12626                 u32 page_off, phy_addr, nvram_cmd;
12627                 __be32 data;
12628
12629                 memcpy(&data, buf + i, 4);
12630                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12631
12632                 page_off = offset % tp->nvram_pagesize;
12633
12634                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12635
12636                 tw32(NVRAM_ADDR, phy_addr);
12637
12638                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12639
12640                 if (page_off == 0 || i == 0)
12641                         nvram_cmd |= NVRAM_CMD_FIRST;
12642                 if (page_off == (tp->nvram_pagesize - 4))
12643                         nvram_cmd |= NVRAM_CMD_LAST;
12644
12645                 if (i == (len - 4))
12646                         nvram_cmd |= NVRAM_CMD_LAST;
12647
12648                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12649                     !tg3_flag(tp, 5755_PLUS) &&
12650                     (tp->nvram_jedecnum == JEDEC_ST) &&
12651                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12652
12653                         if ((ret = tg3_nvram_exec_cmd(tp,
12654                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12655                                 NVRAM_CMD_DONE)))
12656
12657                                 break;
12658                 }
12659                 if (!tg3_flag(tp, FLASH)) {
12660                         /* We always do complete word writes to eeprom. */
12661                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12662                 }
12663
12664                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12665                         break;
12666         }
12667         return ret;
12668 }
12669
12670 /* offset and length are dword aligned */
12671 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12672 {
12673         int ret;
12674
12675         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12676                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12677                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12678                 udelay(40);
12679         }
12680
12681         if (!tg3_flag(tp, NVRAM)) {
12682                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12683         } else {
12684                 u32 grc_mode;
12685
12686                 ret = tg3_nvram_lock(tp);
12687                 if (ret)
12688                         return ret;
12689
12690                 tg3_enable_nvram_access(tp);
12691                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12692                         tw32(NVRAM_WRITE1, 0x406);
12693
12694                 grc_mode = tr32(GRC_MODE);
12695                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12696
12697                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12698                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12699                                 buf);
12700                 } else {
12701                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12702                                 buf);
12703                 }
12704
12705                 grc_mode = tr32(GRC_MODE);
12706                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12707
12708                 tg3_disable_nvram_access(tp);
12709                 tg3_nvram_unlock(tp);
12710         }
12711
12712         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12713                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12714                 udelay(40);
12715         }
12716
12717         return ret;
12718 }
12719
12720 struct subsys_tbl_ent {
12721         u16 subsys_vendor, subsys_devid;
12722         u32 phy_id;
12723 };
12724
12725 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12726         /* Broadcom boards. */
12727         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12728           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12729         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12730           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12731         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12732           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12733         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12734           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12735         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12736           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12737         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12738           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12739         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12740           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12741         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12742           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12743         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12744           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12745         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12746           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12747         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12748           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12749
12750         /* 3com boards. */
12751         { TG3PCI_SUBVENDOR_ID_3COM,
12752           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12753         { TG3PCI_SUBVENDOR_ID_3COM,
12754           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12755         { TG3PCI_SUBVENDOR_ID_3COM,
12756           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12757         { TG3PCI_SUBVENDOR_ID_3COM,
12758           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12759         { TG3PCI_SUBVENDOR_ID_3COM,
12760           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12761
12762         /* DELL boards. */
12763         { TG3PCI_SUBVENDOR_ID_DELL,
12764           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12765         { TG3PCI_SUBVENDOR_ID_DELL,
12766           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12767         { TG3PCI_SUBVENDOR_ID_DELL,
12768           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12769         { TG3PCI_SUBVENDOR_ID_DELL,
12770           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12771
12772         /* Compaq boards. */
12773         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12774           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12775         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12776           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12777         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12778           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12779         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12780           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12781         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12782           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12783
12784         /* IBM boards. */
12785         { TG3PCI_SUBVENDOR_ID_IBM,
12786           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12787 };
12788
12789 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12790 {
12791         int i;
12792
12793         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12794                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12795                      tp->pdev->subsystem_vendor) &&
12796                     (subsys_id_to_phy_id[i].subsys_devid ==
12797                      tp->pdev->subsystem_device))
12798                         return &subsys_id_to_phy_id[i];
12799         }
12800         return NULL;
12801 }
12802
12803 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12804 {
12805         u32 val;
12806
12807         tp->phy_id = TG3_PHY_ID_INVALID;
12808         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12809
12810         /* Assume an onboard device and WOL capable by default.  */
12811         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12812         tg3_flag_set(tp, WOL_CAP);
12813
12814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12815                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12816                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12817                         tg3_flag_set(tp, IS_NIC);
12818                 }
12819                 val = tr32(VCPU_CFGSHDW);
12820                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12821                         tg3_flag_set(tp, ASPM_WORKAROUND);
12822                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12823                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12824                         tg3_flag_set(tp, WOL_ENABLE);
12825                         device_set_wakeup_enable(&tp->pdev->dev, true);
12826                 }
12827                 goto done;
12828         }
12829
12830         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12831         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12832                 u32 nic_cfg, led_cfg;
12833                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12834                 int eeprom_phy_serdes = 0;
12835
12836                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12837                 tp->nic_sram_data_cfg = nic_cfg;
12838
12839                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12840                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12841                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12842                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12843                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12844                     (ver > 0) && (ver < 0x100))
12845                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12846
12847                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12848                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12849
12850                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12851                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12852                         eeprom_phy_serdes = 1;
12853
12854                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12855                 if (nic_phy_id != 0) {
12856                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12857                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12858
12859                         eeprom_phy_id  = (id1 >> 16) << 10;
12860                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12861                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12862                 } else
12863                         eeprom_phy_id = 0;
12864
12865                 tp->phy_id = eeprom_phy_id;
12866                 if (eeprom_phy_serdes) {
12867                         if (!tg3_flag(tp, 5705_PLUS))
12868                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12869                         else
12870                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12871                 }
12872
12873                 if (tg3_flag(tp, 5750_PLUS))
12874                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12875                                     SHASTA_EXT_LED_MODE_MASK);
12876                 else
12877                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12878
12879                 switch (led_cfg) {
12880                 default:
12881                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12882                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12883                         break;
12884
12885                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12886                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12887                         break;
12888
12889                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12890                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12891
12892                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12893                          * read on some older 5700/5701 bootcode.
12894                          */
12895                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12896                             ASIC_REV_5700 ||
12897                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12898                             ASIC_REV_5701)
12899                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12900
12901                         break;
12902
12903                 case SHASTA_EXT_LED_SHARED:
12904                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12905                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12906                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12907                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12908                                                  LED_CTRL_MODE_PHY_2);
12909                         break;
12910
12911                 case SHASTA_EXT_LED_MAC:
12912                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12913                         break;
12914
12915                 case SHASTA_EXT_LED_COMBO:
12916                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12917                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12918                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12919                                                  LED_CTRL_MODE_PHY_2);
12920                         break;
12921
12922                 }
12923
12924                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12925                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12926                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12927                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12928
12929                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12930                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12931
12932                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12933                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12934                         if ((tp->pdev->subsystem_vendor ==
12935                              PCI_VENDOR_ID_ARIMA) &&
12936                             (tp->pdev->subsystem_device == 0x205a ||
12937                              tp->pdev->subsystem_device == 0x2063))
12938                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12939                 } else {
12940                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12941                         tg3_flag_set(tp, IS_NIC);
12942                 }
12943
12944                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12945                         tg3_flag_set(tp, ENABLE_ASF);
12946                         if (tg3_flag(tp, 5750_PLUS))
12947                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12948                 }
12949
12950                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12951                     tg3_flag(tp, 5750_PLUS))
12952                         tg3_flag_set(tp, ENABLE_APE);
12953
12954                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12955                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12956                         tg3_flag_clear(tp, WOL_CAP);
12957
12958                 if (tg3_flag(tp, WOL_CAP) &&
12959                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12960                         tg3_flag_set(tp, WOL_ENABLE);
12961                         device_set_wakeup_enable(&tp->pdev->dev, true);
12962                 }
12963
12964                 if (cfg2 & (1 << 17))
12965                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12966
12967                 /* serdes signal pre-emphasis in register 0x590 set by */
12968                 /* bootcode if bit 18 is set */
12969                 if (cfg2 & (1 << 18))
12970                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12971
12972                 if ((tg3_flag(tp, 57765_PLUS) ||
12973                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12974                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12975                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12976                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12977
12978                 if (tg3_flag(tp, PCI_EXPRESS) &&
12979                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12980                     !tg3_flag(tp, 57765_PLUS)) {
12981                         u32 cfg3;
12982
12983                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12984                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12985                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12986                 }
12987
12988                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12989                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12990                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12991                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12992                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12993                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12994         }
12995 done:
12996         if (tg3_flag(tp, WOL_CAP))
12997                 device_set_wakeup_enable(&tp->pdev->dev,
12998                                          tg3_flag(tp, WOL_ENABLE));
12999         else
13000                 device_set_wakeup_capable(&tp->pdev->dev, false);
13001 }
13002
13003 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13004 {
13005         int i;
13006         u32 val;
13007
13008         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13009         tw32(OTP_CTRL, cmd);
13010
13011         /* Wait for up to 1 ms for command to execute. */
13012         for (i = 0; i < 100; i++) {
13013                 val = tr32(OTP_STATUS);
13014                 if (val & OTP_STATUS_CMD_DONE)
13015                         break;
13016                 udelay(10);
13017         }
13018
13019         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13020 }
13021
13022 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13023  * configuration is a 32-bit value that straddles the alignment boundary.
13024  * We do two 32-bit reads and then shift and merge the results.
13025  */
13026 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13027 {
13028         u32 bhalf_otp, thalf_otp;
13029
13030         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13031
13032         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13033                 return 0;
13034
13035         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13036
13037         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13038                 return 0;
13039
13040         thalf_otp = tr32(OTP_READ_DATA);
13041
13042         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13043
13044         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13045                 return 0;
13046
13047         bhalf_otp = tr32(OTP_READ_DATA);
13048
13049         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13050 }
13051
13052 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13053 {
13054         u32 adv = ADVERTISED_Autoneg |
13055                   ADVERTISED_Pause;
13056
13057         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13058                 adv |= ADVERTISED_1000baseT_Half |
13059                        ADVERTISED_1000baseT_Full;
13060
13061         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13062                 adv |= ADVERTISED_100baseT_Half |
13063                        ADVERTISED_100baseT_Full |
13064                        ADVERTISED_10baseT_Half |
13065                        ADVERTISED_10baseT_Full |
13066                        ADVERTISED_TP;
13067         else
13068                 adv |= ADVERTISED_FIBRE;
13069
13070         tp->link_config.advertising = adv;
13071         tp->link_config.speed = SPEED_INVALID;
13072         tp->link_config.duplex = DUPLEX_INVALID;
13073         tp->link_config.autoneg = AUTONEG_ENABLE;
13074         tp->link_config.active_speed = SPEED_INVALID;
13075         tp->link_config.active_duplex = DUPLEX_INVALID;
13076         tp->link_config.orig_speed = SPEED_INVALID;
13077         tp->link_config.orig_duplex = DUPLEX_INVALID;
13078         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13079 }
13080
13081 static int __devinit tg3_phy_probe(struct tg3 *tp)
13082 {
13083         u32 hw_phy_id_1, hw_phy_id_2;
13084         u32 hw_phy_id, hw_phy_id_masked;
13085         int err;
13086
13087         /* flow control autonegotiation is default behavior */
13088         tg3_flag_set(tp, PAUSE_AUTONEG);
13089         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13090
13091         if (tg3_flag(tp, USE_PHYLIB))
13092                 return tg3_phy_init(tp);
13093
13094         /* Reading the PHY ID register can conflict with ASF
13095          * firmware access to the PHY hardware.
13096          */
13097         err = 0;
13098         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13099                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13100         } else {
13101                 /* Now read the physical PHY_ID from the chip and verify
13102                  * that it is sane.  If it doesn't look good, we fall back
13103                  * to either the hard-coded table based PHY_ID and failing
13104                  * that the value found in the eeprom area.
13105                  */
13106                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13107                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13108
13109                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13110                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13111                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13112
13113                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13114         }
13115
13116         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13117                 tp->phy_id = hw_phy_id;
13118                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13119                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13120                 else
13121                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13122         } else {
13123                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13124                         /* Do nothing, phy ID already set up in
13125                          * tg3_get_eeprom_hw_cfg().
13126                          */
13127                 } else {
13128                         struct subsys_tbl_ent *p;
13129
13130                         /* No eeprom signature?  Try the hardcoded
13131                          * subsys device table.
13132                          */
13133                         p = tg3_lookup_by_subsys(tp);
13134                         if (!p)
13135                                 return -ENODEV;
13136
13137                         tp->phy_id = p->phy_id;
13138                         if (!tp->phy_id ||
13139                             tp->phy_id == TG3_PHY_ID_BCM8002)
13140                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13141                 }
13142         }
13143
13144         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13145             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13146              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13147              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13148               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13149              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13150               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13151                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13152
13153         tg3_phy_init_link_config(tp);
13154
13155         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13156             !tg3_flag(tp, ENABLE_APE) &&
13157             !tg3_flag(tp, ENABLE_ASF)) {
13158                 u32 bmsr, mask;
13159
13160                 tg3_readphy(tp, MII_BMSR, &bmsr);
13161                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13162                     (bmsr & BMSR_LSTATUS))
13163                         goto skip_phy_reset;
13164
13165                 err = tg3_phy_reset(tp);
13166                 if (err)
13167                         return err;
13168
13169                 tg3_phy_set_wirespeed(tp);
13170
13171                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13172                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13173                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13174                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13175                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13176                                             tp->link_config.flowctrl);
13177
13178                         tg3_writephy(tp, MII_BMCR,
13179                                      BMCR_ANENABLE | BMCR_ANRESTART);
13180                 }
13181         }
13182
13183 skip_phy_reset:
13184         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13185                 err = tg3_init_5401phy_dsp(tp);
13186                 if (err)
13187                         return err;
13188
13189                 err = tg3_init_5401phy_dsp(tp);
13190         }
13191
13192         return err;
13193 }
13194
13195 static void __devinit tg3_read_vpd(struct tg3 *tp)
13196 {
13197         u8 *vpd_data;
13198         unsigned int block_end, rosize, len;
13199         int j, i = 0;
13200
13201         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13202         if (!vpd_data)
13203                 goto out_no_vpd;
13204
13205         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13206                              PCI_VPD_LRDT_RO_DATA);
13207         if (i < 0)
13208                 goto out_not_found;
13209
13210         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13211         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13212         i += PCI_VPD_LRDT_TAG_SIZE;
13213
13214         if (block_end > TG3_NVM_VPD_LEN)
13215                 goto out_not_found;
13216
13217         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13218                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13219         if (j > 0) {
13220                 len = pci_vpd_info_field_size(&vpd_data[j]);
13221
13222                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13223                 if (j + len > block_end || len != 4 ||
13224                     memcmp(&vpd_data[j], "1028", 4))
13225                         goto partno;
13226
13227                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13228                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13229                 if (j < 0)
13230                         goto partno;
13231
13232                 len = pci_vpd_info_field_size(&vpd_data[j]);
13233
13234                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13235                 if (j + len > block_end)
13236                         goto partno;
13237
13238                 memcpy(tp->fw_ver, &vpd_data[j], len);
13239                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13240         }
13241
13242 partno:
13243         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13244                                       PCI_VPD_RO_KEYWORD_PARTNO);
13245         if (i < 0)
13246                 goto out_not_found;
13247
13248         len = pci_vpd_info_field_size(&vpd_data[i]);
13249
13250         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13251         if (len > TG3_BPN_SIZE ||
13252             (len + i) > TG3_NVM_VPD_LEN)
13253                 goto out_not_found;
13254
13255         memcpy(tp->board_part_number, &vpd_data[i], len);
13256
13257 out_not_found:
13258         kfree(vpd_data);
13259         if (tp->board_part_number[0])
13260                 return;
13261
13262 out_no_vpd:
13263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13264                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13265                         strcpy(tp->board_part_number, "BCM5717");
13266                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13267                         strcpy(tp->board_part_number, "BCM5718");
13268                 else
13269                         goto nomatch;
13270         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13271                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13272                         strcpy(tp->board_part_number, "BCM57780");
13273                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13274                         strcpy(tp->board_part_number, "BCM57760");
13275                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13276                         strcpy(tp->board_part_number, "BCM57790");
13277                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13278                         strcpy(tp->board_part_number, "BCM57788");
13279                 else
13280                         goto nomatch;
13281         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13282                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13283                         strcpy(tp->board_part_number, "BCM57761");
13284                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13285                         strcpy(tp->board_part_number, "BCM57765");
13286                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13287                         strcpy(tp->board_part_number, "BCM57781");
13288                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13289                         strcpy(tp->board_part_number, "BCM57785");
13290                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13291                         strcpy(tp->board_part_number, "BCM57791");
13292                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13293                         strcpy(tp->board_part_number, "BCM57795");
13294                 else
13295                         goto nomatch;
13296         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13297                 strcpy(tp->board_part_number, "BCM95906");
13298         } else {
13299 nomatch:
13300                 strcpy(tp->board_part_number, "none");
13301         }
13302 }
13303
13304 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13305 {
13306         u32 val;
13307
13308         if (tg3_nvram_read(tp, offset, &val) ||
13309             (val & 0xfc000000) != 0x0c000000 ||
13310             tg3_nvram_read(tp, offset + 4, &val) ||
13311             val != 0)
13312                 return 0;
13313
13314         return 1;
13315 }
13316
13317 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13318 {
13319         u32 val, offset, start, ver_offset;
13320         int i, dst_off;
13321         bool newver = false;
13322
13323         if (tg3_nvram_read(tp, 0xc, &offset) ||
13324             tg3_nvram_read(tp, 0x4, &start))
13325                 return;
13326
13327         offset = tg3_nvram_logical_addr(tp, offset);
13328
13329         if (tg3_nvram_read(tp, offset, &val))
13330                 return;
13331
13332         if ((val & 0xfc000000) == 0x0c000000) {
13333                 if (tg3_nvram_read(tp, offset + 4, &val))
13334                         return;
13335
13336                 if (val == 0)
13337                         newver = true;
13338         }
13339
13340         dst_off = strlen(tp->fw_ver);
13341
13342         if (newver) {
13343                 if (TG3_VER_SIZE - dst_off < 16 ||
13344                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13345                         return;
13346
13347                 offset = offset + ver_offset - start;
13348                 for (i = 0; i < 16; i += 4) {
13349                         __be32 v;
13350                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13351                                 return;
13352
13353                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13354                 }
13355         } else {
13356                 u32 major, minor;
13357
13358                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13359                         return;
13360
13361                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13362                         TG3_NVM_BCVER_MAJSFT;
13363                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13364                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13365                          "v%d.%02d", major, minor);
13366         }
13367 }
13368
13369 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13370 {
13371         u32 val, major, minor;
13372
13373         /* Use native endian representation */
13374         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13375                 return;
13376
13377         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13378                 TG3_NVM_HWSB_CFG1_MAJSFT;
13379         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13380                 TG3_NVM_HWSB_CFG1_MINSFT;
13381
13382         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13383 }
13384
13385 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13386 {
13387         u32 offset, major, minor, build;
13388
13389         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13390
13391         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13392                 return;
13393
13394         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13395         case TG3_EEPROM_SB_REVISION_0:
13396                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13397                 break;
13398         case TG3_EEPROM_SB_REVISION_2:
13399                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13400                 break;
13401         case TG3_EEPROM_SB_REVISION_3:
13402                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13403                 break;
13404         case TG3_EEPROM_SB_REVISION_4:
13405                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13406                 break;
13407         case TG3_EEPROM_SB_REVISION_5:
13408                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13409                 break;
13410         case TG3_EEPROM_SB_REVISION_6:
13411                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13412                 break;
13413         default:
13414                 return;
13415         }
13416
13417         if (tg3_nvram_read(tp, offset, &val))
13418                 return;
13419
13420         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13421                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13422         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13423                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13424         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13425
13426         if (minor > 99 || build > 26)
13427                 return;
13428
13429         offset = strlen(tp->fw_ver);
13430         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13431                  " v%d.%02d", major, minor);
13432
13433         if (build > 0) {
13434                 offset = strlen(tp->fw_ver);
13435                 if (offset < TG3_VER_SIZE - 1)
13436                         tp->fw_ver[offset] = 'a' + build - 1;
13437         }
13438 }
13439
13440 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13441 {
13442         u32 val, offset, start;
13443         int i, vlen;
13444
13445         for (offset = TG3_NVM_DIR_START;
13446              offset < TG3_NVM_DIR_END;
13447              offset += TG3_NVM_DIRENT_SIZE) {
13448                 if (tg3_nvram_read(tp, offset, &val))
13449                         return;
13450
13451                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13452                         break;
13453         }
13454
13455         if (offset == TG3_NVM_DIR_END)
13456                 return;
13457
13458         if (!tg3_flag(tp, 5705_PLUS))
13459                 start = 0x08000000;
13460         else if (tg3_nvram_read(tp, offset - 4, &start))
13461                 return;
13462
13463         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13464             !tg3_fw_img_is_valid(tp, offset) ||
13465             tg3_nvram_read(tp, offset + 8, &val))
13466                 return;
13467
13468         offset += val - start;
13469
13470         vlen = strlen(tp->fw_ver);
13471
13472         tp->fw_ver[vlen++] = ',';
13473         tp->fw_ver[vlen++] = ' ';
13474
13475         for (i = 0; i < 4; i++) {
13476                 __be32 v;
13477                 if (tg3_nvram_read_be32(tp, offset, &v))
13478                         return;
13479
13480                 offset += sizeof(v);
13481
13482                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13483                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13484                         break;
13485                 }
13486
13487                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13488                 vlen += sizeof(v);
13489         }
13490 }
13491
13492 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13493 {
13494         int vlen;
13495         u32 apedata;
13496         char *fwtype;
13497
13498         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13499                 return;
13500
13501         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13502         if (apedata != APE_SEG_SIG_MAGIC)
13503                 return;
13504
13505         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13506         if (!(apedata & APE_FW_STATUS_READY))
13507                 return;
13508
13509         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13510
13511         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13512                 tg3_flag_set(tp, APE_HAS_NCSI);
13513                 fwtype = "NCSI";
13514         } else {
13515                 fwtype = "DASH";
13516         }
13517
13518         vlen = strlen(tp->fw_ver);
13519
13520         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13521                  fwtype,
13522                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13523                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13524                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13525                  (apedata & APE_FW_VERSION_BLDMSK));
13526 }
13527
13528 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13529 {
13530         u32 val;
13531         bool vpd_vers = false;
13532
13533         if (tp->fw_ver[0] != 0)
13534                 vpd_vers = true;
13535
13536         if (tg3_flag(tp, NO_NVRAM)) {
13537                 strcat(tp->fw_ver, "sb");
13538                 return;
13539         }
13540
13541         if (tg3_nvram_read(tp, 0, &val))
13542                 return;
13543
13544         if (val == TG3_EEPROM_MAGIC)
13545                 tg3_read_bc_ver(tp);
13546         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13547                 tg3_read_sb_ver(tp, val);
13548         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13549                 tg3_read_hwsb_ver(tp);
13550         else
13551                 return;
13552
13553         if (vpd_vers)
13554                 goto done;
13555
13556         if (tg3_flag(tp, ENABLE_APE)) {
13557                 if (tg3_flag(tp, ENABLE_ASF))
13558                         tg3_read_dash_ver(tp);
13559         } else if (tg3_flag(tp, ENABLE_ASF)) {
13560                 tg3_read_mgmtfw_ver(tp);
13561         }
13562
13563 done:
13564         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13565 }
13566
13567 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13568
13569 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13570 {
13571         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13572                 return TG3_RX_RET_MAX_SIZE_5717;
13573         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13574                 return TG3_RX_RET_MAX_SIZE_5700;
13575         else
13576                 return TG3_RX_RET_MAX_SIZE_5705;
13577 }
13578
13579 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13580         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13581         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13582         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13583         { },
13584 };
13585
13586 static int __devinit tg3_get_invariants(struct tg3 *tp)
13587 {
13588         u32 misc_ctrl_reg;
13589         u32 pci_state_reg, grc_misc_cfg;
13590         u32 val;
13591         u16 pci_cmd;
13592         int err;
13593
13594         /* Force memory write invalidate off.  If we leave it on,
13595          * then on 5700_BX chips we have to enable a workaround.
13596          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13597          * to match the cacheline size.  The Broadcom driver have this
13598          * workaround but turns MWI off all the times so never uses
13599          * it.  This seems to suggest that the workaround is insufficient.
13600          */
13601         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13602         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13603         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13604
13605         /* Important! -- Make sure register accesses are byteswapped
13606          * correctly.  Also, for those chips that require it, make
13607          * sure that indirect register accesses are enabled before
13608          * the first operation.
13609          */
13610         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13611                               &misc_ctrl_reg);
13612         tp->misc_host_ctrl |= (misc_ctrl_reg &
13613                                MISC_HOST_CTRL_CHIPREV);
13614         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13615                                tp->misc_host_ctrl);
13616
13617         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13618                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13620                 u32 prod_id_asic_rev;
13621
13622                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13623                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13624                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13625                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13626                         pci_read_config_dword(tp->pdev,
13627                                               TG3PCI_GEN2_PRODID_ASICREV,
13628                                               &prod_id_asic_rev);
13629                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13630                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13631                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13632                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13633                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13634                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13635                         pci_read_config_dword(tp->pdev,
13636                                               TG3PCI_GEN15_PRODID_ASICREV,
13637                                               &prod_id_asic_rev);
13638                 else
13639                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13640                                               &prod_id_asic_rev);
13641
13642                 tp->pci_chip_rev_id = prod_id_asic_rev;
13643         }
13644
13645         /* Wrong chip ID in 5752 A0. This code can be removed later
13646          * as A0 is not in production.
13647          */
13648         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13649                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13650
13651         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13652          * we need to disable memory and use config. cycles
13653          * only to access all registers. The 5702/03 chips
13654          * can mistakenly decode the special cycles from the
13655          * ICH chipsets as memory write cycles, causing corruption
13656          * of register and memory space. Only certain ICH bridges
13657          * will drive special cycles with non-zero data during the
13658          * address phase which can fall within the 5703's address
13659          * range. This is not an ICH bug as the PCI spec allows
13660          * non-zero address during special cycles. However, only
13661          * these ICH bridges are known to drive non-zero addresses
13662          * during special cycles.
13663          *
13664          * Since special cycles do not cross PCI bridges, we only
13665          * enable this workaround if the 5703 is on the secondary
13666          * bus of these ICH bridges.
13667          */
13668         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13669             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13670                 static struct tg3_dev_id {
13671                         u32     vendor;
13672                         u32     device;
13673                         u32     rev;
13674                 } ich_chipsets[] = {
13675                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13676                           PCI_ANY_ID },
13677                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13678                           PCI_ANY_ID },
13679                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13680                           0xa },
13681                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13682                           PCI_ANY_ID },
13683                         { },
13684                 };
13685                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13686                 struct pci_dev *bridge = NULL;
13687
13688                 while (pci_id->vendor != 0) {
13689                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13690                                                 bridge);
13691                         if (!bridge) {
13692                                 pci_id++;
13693                                 continue;
13694                         }
13695                         if (pci_id->rev != PCI_ANY_ID) {
13696                                 if (bridge->revision > pci_id->rev)
13697                                         continue;
13698                         }
13699                         if (bridge->subordinate &&
13700                             (bridge->subordinate->number ==
13701                              tp->pdev->bus->number)) {
13702                                 tg3_flag_set(tp, ICH_WORKAROUND);
13703                                 pci_dev_put(bridge);
13704                                 break;
13705                         }
13706                 }
13707         }
13708
13709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13710                 static struct tg3_dev_id {
13711                         u32     vendor;
13712                         u32     device;
13713                 } bridge_chipsets[] = {
13714                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13715                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13716                         { },
13717                 };
13718                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13719                 struct pci_dev *bridge = NULL;
13720
13721                 while (pci_id->vendor != 0) {
13722                         bridge = pci_get_device(pci_id->vendor,
13723                                                 pci_id->device,
13724                                                 bridge);
13725                         if (!bridge) {
13726                                 pci_id++;
13727                                 continue;
13728                         }
13729                         if (bridge->subordinate &&
13730                             (bridge->subordinate->number <=
13731                              tp->pdev->bus->number) &&
13732                             (bridge->subordinate->subordinate >=
13733                              tp->pdev->bus->number)) {
13734                                 tg3_flag_set(tp, 5701_DMA_BUG);
13735                                 pci_dev_put(bridge);
13736                                 break;
13737                         }
13738                 }
13739         }
13740
13741         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13742          * DMA addresses > 40-bit. This bridge may have other additional
13743          * 57xx devices behind it in some 4-port NIC designs for example.
13744          * Any tg3 device found behind the bridge will also need the 40-bit
13745          * DMA workaround.
13746          */
13747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13748             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13749                 tg3_flag_set(tp, 5780_CLASS);
13750                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13751                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13752         } else {
13753                 struct pci_dev *bridge = NULL;
13754
13755                 do {
13756                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13757                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13758                                                 bridge);
13759                         if (bridge && bridge->subordinate &&
13760                             (bridge->subordinate->number <=
13761                              tp->pdev->bus->number) &&
13762                             (bridge->subordinate->subordinate >=
13763                              tp->pdev->bus->number)) {
13764                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13765                                 pci_dev_put(bridge);
13766                                 break;
13767                         }
13768                 } while (bridge);
13769         }
13770
13771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13772             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13773                 tp->pdev_peer = tg3_find_peer(tp);
13774
13775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13776             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13777             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13778                 tg3_flag_set(tp, 5717_PLUS);
13779
13780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13781             tg3_flag(tp, 5717_PLUS))
13782                 tg3_flag_set(tp, 57765_PLUS);
13783
13784         /* Intentionally exclude ASIC_REV_5906 */
13785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13787             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13788             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13791             tg3_flag(tp, 57765_PLUS))
13792                 tg3_flag_set(tp, 5755_PLUS);
13793
13794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13796             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13797             tg3_flag(tp, 5755_PLUS) ||
13798             tg3_flag(tp, 5780_CLASS))
13799                 tg3_flag_set(tp, 5750_PLUS);
13800
13801         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13802             tg3_flag(tp, 5750_PLUS))
13803                 tg3_flag_set(tp, 5705_PLUS);
13804
13805         /* Determine TSO capabilities */
13806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13807                 ; /* Do nothing. HW bug. */
13808         else if (tg3_flag(tp, 57765_PLUS))
13809                 tg3_flag_set(tp, HW_TSO_3);
13810         else if (tg3_flag(tp, 5755_PLUS) ||
13811                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13812                 tg3_flag_set(tp, HW_TSO_2);
13813         else if (tg3_flag(tp, 5750_PLUS)) {
13814                 tg3_flag_set(tp, HW_TSO_1);
13815                 tg3_flag_set(tp, TSO_BUG);
13816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13817                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13818                         tg3_flag_clear(tp, TSO_BUG);
13819         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13820                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13821                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13822                         tg3_flag_set(tp, TSO_BUG);
13823                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13824                         tp->fw_needed = FIRMWARE_TG3TSO5;
13825                 else
13826                         tp->fw_needed = FIRMWARE_TG3TSO;
13827         }
13828
13829         /* Selectively allow TSO based on operating conditions */
13830         if (tg3_flag(tp, HW_TSO_1) ||
13831             tg3_flag(tp, HW_TSO_2) ||
13832             tg3_flag(tp, HW_TSO_3) ||
13833             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13834                 tg3_flag_set(tp, TSO_CAPABLE);
13835         else {
13836                 tg3_flag_clear(tp, TSO_CAPABLE);
13837                 tg3_flag_clear(tp, TSO_BUG);
13838                 tp->fw_needed = NULL;
13839         }
13840
13841         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13842                 tp->fw_needed = FIRMWARE_TG3;
13843
13844         tp->irq_max = 1;
13845
13846         if (tg3_flag(tp, 5750_PLUS)) {
13847                 tg3_flag_set(tp, SUPPORT_MSI);
13848                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13849                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13850                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13851                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13852                      tp->pdev_peer == tp->pdev))
13853                         tg3_flag_clear(tp, SUPPORT_MSI);
13854
13855                 if (tg3_flag(tp, 5755_PLUS) ||
13856                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13857                         tg3_flag_set(tp, 1SHOT_MSI);
13858                 }
13859
13860                 if (tg3_flag(tp, 57765_PLUS)) {
13861                         tg3_flag_set(tp, SUPPORT_MSIX);
13862                         tp->irq_max = TG3_IRQ_MAX_VECS;
13863                 }
13864         }
13865
13866         if (tg3_flag(tp, 5755_PLUS))
13867                 tg3_flag_set(tp, SHORT_DMA_BUG);
13868
13869         if (tg3_flag(tp, 5717_PLUS))
13870                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13871
13872         if (tg3_flag(tp, 57765_PLUS) &&
13873             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13874                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13875
13876         if (!tg3_flag(tp, 5705_PLUS) ||
13877             tg3_flag(tp, 5780_CLASS) ||
13878             tg3_flag(tp, USE_JUMBO_BDFLAG))
13879                 tg3_flag_set(tp, JUMBO_CAPABLE);
13880
13881         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13882                               &pci_state_reg);
13883
13884         if (pci_is_pcie(tp->pdev)) {
13885                 u16 lnkctl;
13886
13887                 tg3_flag_set(tp, PCI_EXPRESS);
13888
13889                 tp->pcie_readrq = 4096;
13890                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13891                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13892                         tp->pcie_readrq = 2048;
13893
13894                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13895
13896                 pci_read_config_word(tp->pdev,
13897                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13898                                      &lnkctl);
13899                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13900                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13901                             ASIC_REV_5906) {
13902                                 tg3_flag_clear(tp, HW_TSO_2);
13903                                 tg3_flag_clear(tp, TSO_CAPABLE);
13904                         }
13905                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13906                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13907                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13908                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13909                                 tg3_flag_set(tp, CLKREQ_BUG);
13910                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13911                         tg3_flag_set(tp, L1PLLPD_EN);
13912                 }
13913         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13914                 /* BCM5785 devices are effectively PCIe devices, and should
13915                  * follow PCIe codepaths, but do not have a PCIe capabilities
13916                  * section.
13917                 */
13918                 tg3_flag_set(tp, PCI_EXPRESS);
13919         } else if (!tg3_flag(tp, 5705_PLUS) ||
13920                    tg3_flag(tp, 5780_CLASS)) {
13921                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13922                 if (!tp->pcix_cap) {
13923                         dev_err(&tp->pdev->dev,
13924                                 "Cannot find PCI-X capability, aborting\n");
13925                         return -EIO;
13926                 }
13927
13928                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13929                         tg3_flag_set(tp, PCIX_MODE);
13930         }
13931
13932         /* If we have an AMD 762 or VIA K8T800 chipset, write
13933          * reordering to the mailbox registers done by the host
13934          * controller can cause major troubles.  We read back from
13935          * every mailbox register write to force the writes to be
13936          * posted to the chip in order.
13937          */
13938         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13939             !tg3_flag(tp, PCI_EXPRESS))
13940                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13941
13942         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13943                              &tp->pci_cacheline_sz);
13944         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13945                              &tp->pci_lat_timer);
13946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13947             tp->pci_lat_timer < 64) {
13948                 tp->pci_lat_timer = 64;
13949                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13950                                       tp->pci_lat_timer);
13951         }
13952
13953         /* Important! -- It is critical that the PCI-X hw workaround
13954          * situation is decided before the first MMIO register access.
13955          */
13956         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13957                 /* 5700 BX chips need to have their TX producer index
13958                  * mailboxes written twice to workaround a bug.
13959                  */
13960                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13961
13962                 /* If we are in PCI-X mode, enable register write workaround.
13963                  *
13964                  * The workaround is to use indirect register accesses
13965                  * for all chip writes not to mailbox registers.
13966                  */
13967                 if (tg3_flag(tp, PCIX_MODE)) {
13968                         u32 pm_reg;
13969
13970                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13971
13972                         /* The chip can have it's power management PCI config
13973                          * space registers clobbered due to this bug.
13974                          * So explicitly force the chip into D0 here.
13975                          */
13976                         pci_read_config_dword(tp->pdev,
13977                                               tp->pm_cap + PCI_PM_CTRL,
13978                                               &pm_reg);
13979                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13980                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13981                         pci_write_config_dword(tp->pdev,
13982                                                tp->pm_cap + PCI_PM_CTRL,
13983                                                pm_reg);
13984
13985                         /* Also, force SERR#/PERR# in PCI command. */
13986                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13987                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13988                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13989                 }
13990         }
13991
13992         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13993                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13994         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13995                 tg3_flag_set(tp, PCI_32BIT);
13996
13997         /* Chip-specific fixup from Broadcom driver */
13998         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13999             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14000                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14001                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14002         }
14003
14004         /* Default fast path register access methods */
14005         tp->read32 = tg3_read32;
14006         tp->write32 = tg3_write32;
14007         tp->read32_mbox = tg3_read32;
14008         tp->write32_mbox = tg3_write32;
14009         tp->write32_tx_mbox = tg3_write32;
14010         tp->write32_rx_mbox = tg3_write32;
14011
14012         /* Various workaround register access methods */
14013         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14014                 tp->write32 = tg3_write_indirect_reg32;
14015         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14016                  (tg3_flag(tp, PCI_EXPRESS) &&
14017                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14018                 /*
14019                  * Back to back register writes can cause problems on these
14020                  * chips, the workaround is to read back all reg writes
14021                  * except those to mailbox regs.
14022                  *
14023                  * See tg3_write_indirect_reg32().
14024                  */
14025                 tp->write32 = tg3_write_flush_reg32;
14026         }
14027
14028         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14029                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14030                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14031                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14032         }
14033
14034         if (tg3_flag(tp, ICH_WORKAROUND)) {
14035                 tp->read32 = tg3_read_indirect_reg32;
14036                 tp->write32 = tg3_write_indirect_reg32;
14037                 tp->read32_mbox = tg3_read_indirect_mbox;
14038                 tp->write32_mbox = tg3_write_indirect_mbox;
14039                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14040                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14041
14042                 iounmap(tp->regs);
14043                 tp->regs = NULL;
14044
14045                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14046                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14047                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14048         }
14049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14050                 tp->read32_mbox = tg3_read32_mbox_5906;
14051                 tp->write32_mbox = tg3_write32_mbox_5906;
14052                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14053                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14054         }
14055
14056         if (tp->write32 == tg3_write_indirect_reg32 ||
14057             (tg3_flag(tp, PCIX_MODE) &&
14058              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14059               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14060                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14061
14062         /* The memory arbiter has to be enabled in order for SRAM accesses
14063          * to succeed.  Normally on powerup the tg3 chip firmware will make
14064          * sure it is enabled, but other entities such as system netboot
14065          * code might disable it.
14066          */
14067         val = tr32(MEMARB_MODE);
14068         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14069
14070         if (tg3_flag(tp, PCIX_MODE)) {
14071                 pci_read_config_dword(tp->pdev,
14072                                       tp->pcix_cap + PCI_X_STATUS, &val);
14073                 tp->pci_fn = val & 0x7;
14074         } else {
14075                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14076         }
14077
14078         /* Get eeprom hw config before calling tg3_set_power_state().
14079          * In particular, the TG3_FLAG_IS_NIC flag must be
14080          * determined before calling tg3_set_power_state() so that
14081          * we know whether or not to switch out of Vaux power.
14082          * When the flag is set, it means that GPIO1 is used for eeprom
14083          * write protect and also implies that it is a LOM where GPIOs
14084          * are not used to switch power.
14085          */
14086         tg3_get_eeprom_hw_cfg(tp);
14087
14088         if (tg3_flag(tp, ENABLE_APE)) {
14089                 /* Allow reads and writes to the
14090                  * APE register and memory space.
14091                  */
14092                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14093                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14094                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14095                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14096                                        pci_state_reg);
14097
14098                 tg3_ape_lock_init(tp);
14099         }
14100
14101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14102             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14105             tg3_flag(tp, 57765_PLUS))
14106                 tg3_flag_set(tp, CPMU_PRESENT);
14107
14108         /* Set up tp->grc_local_ctrl before calling
14109          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14110          * will bring 5700's external PHY out of reset.
14111          * It is also used as eeprom write protect on LOMs.
14112          */
14113         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14115             tg3_flag(tp, EEPROM_WRITE_PROT))
14116                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14117                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14118         /* Unused GPIO3 must be driven as output on 5752 because there
14119          * are no pull-up resistors on unused GPIO pins.
14120          */
14121         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14122                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14123
14124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14125             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14126             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14127                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14128
14129         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14130             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14131                 /* Turn off the debug UART. */
14132                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14133                 if (tg3_flag(tp, IS_NIC))
14134                         /* Keep VMain power. */
14135                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14136                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14137         }
14138
14139         /* Switch out of Vaux if it is a NIC */
14140         tg3_pwrsrc_switch_to_vmain(tp);
14141
14142         /* Derive initial jumbo mode from MTU assigned in
14143          * ether_setup() via the alloc_etherdev() call
14144          */
14145         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14146                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14147
14148         /* Determine WakeOnLan speed to use. */
14149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14150             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14151             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14152             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14153                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14154         } else {
14155                 tg3_flag_set(tp, WOL_SPEED_100MB);
14156         }
14157
14158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14159                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14160
14161         /* A few boards don't want Ethernet@WireSpeed phy feature */
14162         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14163             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14164              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14165              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14166             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14167             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14168                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14169
14170         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14171             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14172                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14173         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14174                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14175
14176         if (tg3_flag(tp, 5705_PLUS) &&
14177             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14178             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14179             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14180             !tg3_flag(tp, 57765_PLUS)) {
14181                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14182                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14183                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14184                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14185                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14186                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14187                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14188                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14189                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14190                 } else
14191                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14192         }
14193
14194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14195             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14196                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14197                 if (tp->phy_otp == 0)
14198                         tp->phy_otp = TG3_OTP_DEFAULT;
14199         }
14200
14201         if (tg3_flag(tp, CPMU_PRESENT))
14202                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14203         else
14204                 tp->mi_mode = MAC_MI_MODE_BASE;
14205
14206         tp->coalesce_mode = 0;
14207         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14208             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14209                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14210
14211         /* Set these bits to enable statistics workaround. */
14212         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14213             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14214             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14215                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14216                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14217         }
14218
14219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14220             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14221                 tg3_flag_set(tp, USE_PHYLIB);
14222
14223         err = tg3_mdio_init(tp);
14224         if (err)
14225                 return err;
14226
14227         /* Initialize data/descriptor byte/word swapping. */
14228         val = tr32(GRC_MODE);
14229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14230                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14231                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14232                         GRC_MODE_B2HRX_ENABLE |
14233                         GRC_MODE_HTX2B_ENABLE |
14234                         GRC_MODE_HOST_STACKUP);
14235         else
14236                 val &= GRC_MODE_HOST_STACKUP;
14237
14238         tw32(GRC_MODE, val | tp->grc_mode);
14239
14240         tg3_switch_clocks(tp);
14241
14242         /* Clear this out for sanity. */
14243         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14244
14245         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14246                               &pci_state_reg);
14247         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14248             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14249                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14250
14251                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14252                     chiprevid == CHIPREV_ID_5701_B0 ||
14253                     chiprevid == CHIPREV_ID_5701_B2 ||
14254                     chiprevid == CHIPREV_ID_5701_B5) {
14255                         void __iomem *sram_base;
14256
14257                         /* Write some dummy words into the SRAM status block
14258                          * area, see if it reads back correctly.  If the return
14259                          * value is bad, force enable the PCIX workaround.
14260                          */
14261                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14262
14263                         writel(0x00000000, sram_base);
14264                         writel(0x00000000, sram_base + 4);
14265                         writel(0xffffffff, sram_base + 4);
14266                         if (readl(sram_base) != 0x00000000)
14267                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14268                 }
14269         }
14270
14271         udelay(50);
14272         tg3_nvram_init(tp);
14273
14274         grc_misc_cfg = tr32(GRC_MISC_CFG);
14275         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14276
14277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14278             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14279              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14280                 tg3_flag_set(tp, IS_5788);
14281
14282         if (!tg3_flag(tp, IS_5788) &&
14283             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14284                 tg3_flag_set(tp, TAGGED_STATUS);
14285         if (tg3_flag(tp, TAGGED_STATUS)) {
14286                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14287                                       HOSTCC_MODE_CLRTICK_TXBD);
14288
14289                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14290                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14291                                        tp->misc_host_ctrl);
14292         }
14293
14294         /* Preserve the APE MAC_MODE bits */
14295         if (tg3_flag(tp, ENABLE_APE))
14296                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14297         else
14298                 tp->mac_mode = TG3_DEF_MAC_MODE;
14299
14300         /* these are limited to 10/100 only */
14301         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14302              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14303             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14304              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14305              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14306               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14307               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14308             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14309              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14310               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14311               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14312             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14313             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14314             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14315             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14316                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14317
14318         err = tg3_phy_probe(tp);
14319         if (err) {
14320                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14321                 /* ... but do not return immediately ... */
14322                 tg3_mdio_fini(tp);
14323         }
14324
14325         tg3_read_vpd(tp);
14326         tg3_read_fw_ver(tp);
14327
14328         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14329                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14330         } else {
14331                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14332                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14333                 else
14334                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14335         }
14336
14337         /* 5700 {AX,BX} chips have a broken status block link
14338          * change bit implementation, so we must use the
14339          * status register in those cases.
14340          */
14341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14342                 tg3_flag_set(tp, USE_LINKCHG_REG);
14343         else
14344                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14345
14346         /* The led_ctrl is set during tg3_phy_probe, here we might
14347          * have to force the link status polling mechanism based
14348          * upon subsystem IDs.
14349          */
14350         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14351             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14352             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14353                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14354                 tg3_flag_set(tp, USE_LINKCHG_REG);
14355         }
14356
14357         /* For all SERDES we poll the MAC status register. */
14358         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14359                 tg3_flag_set(tp, POLL_SERDES);
14360         else
14361                 tg3_flag_clear(tp, POLL_SERDES);
14362
14363         tp->rx_offset = NET_IP_ALIGN;
14364         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14366             tg3_flag(tp, PCIX_MODE)) {
14367                 tp->rx_offset = 0;
14368 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14369                 tp->rx_copy_thresh = ~(u16)0;
14370 #endif
14371         }
14372
14373         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14374         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14375         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14376
14377         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14378
14379         /* Increment the rx prod index on the rx std ring by at most
14380          * 8 for these chips to workaround hw errata.
14381          */
14382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14384             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14385                 tp->rx_std_max_post = 8;
14386
14387         if (tg3_flag(tp, ASPM_WORKAROUND))
14388                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14389                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14390
14391         return err;
14392 }
14393
14394 #ifdef CONFIG_SPARC
14395 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14396 {
14397         struct net_device *dev = tp->dev;
14398         struct pci_dev *pdev = tp->pdev;
14399         struct device_node *dp = pci_device_to_OF_node(pdev);
14400         const unsigned char *addr;
14401         int len;
14402
14403         addr = of_get_property(dp, "local-mac-address", &len);
14404         if (addr && len == 6) {
14405                 memcpy(dev->dev_addr, addr, 6);
14406                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14407                 return 0;
14408         }
14409         return -ENODEV;
14410 }
14411
14412 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14413 {
14414         struct net_device *dev = tp->dev;
14415
14416         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14417         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14418         return 0;
14419 }
14420 #endif
14421
14422 static int __devinit tg3_get_device_address(struct tg3 *tp)
14423 {
14424         struct net_device *dev = tp->dev;
14425         u32 hi, lo, mac_offset;
14426         int addr_ok = 0;
14427
14428 #ifdef CONFIG_SPARC
14429         if (!tg3_get_macaddr_sparc(tp))
14430                 return 0;
14431 #endif
14432
14433         mac_offset = 0x7c;
14434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14435             tg3_flag(tp, 5780_CLASS)) {
14436                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14437                         mac_offset = 0xcc;
14438                 if (tg3_nvram_lock(tp))
14439                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14440                 else
14441                         tg3_nvram_unlock(tp);
14442         } else if (tg3_flag(tp, 5717_PLUS)) {
14443                 if (tp->pci_fn & 1)
14444                         mac_offset = 0xcc;
14445                 if (tp->pci_fn > 1)
14446                         mac_offset += 0x18c;
14447         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14448                 mac_offset = 0x10;
14449
14450         /* First try to get it from MAC address mailbox. */
14451         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14452         if ((hi >> 16) == 0x484b) {
14453                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14454                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14455
14456                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14457                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14458                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14459                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14460                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14461
14462                 /* Some old bootcode may report a 0 MAC address in SRAM */
14463                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14464         }
14465         if (!addr_ok) {
14466                 /* Next, try NVRAM. */
14467                 if (!tg3_flag(tp, NO_NVRAM) &&
14468                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14469                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14470                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14471                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14472                 }
14473                 /* Finally just fetch it out of the MAC control regs. */
14474                 else {
14475                         hi = tr32(MAC_ADDR_0_HIGH);
14476                         lo = tr32(MAC_ADDR_0_LOW);
14477
14478                         dev->dev_addr[5] = lo & 0xff;
14479                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14480                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14481                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14482                         dev->dev_addr[1] = hi & 0xff;
14483                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14484                 }
14485         }
14486
14487         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14488 #ifdef CONFIG_SPARC
14489                 if (!tg3_get_default_macaddr_sparc(tp))
14490                         return 0;
14491 #endif
14492                 return -EINVAL;
14493         }
14494         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14495         return 0;
14496 }
14497
14498 #define BOUNDARY_SINGLE_CACHELINE       1
14499 #define BOUNDARY_MULTI_CACHELINE        2
14500
14501 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14502 {
14503         int cacheline_size;
14504         u8 byte;
14505         int goal;
14506
14507         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14508         if (byte == 0)
14509                 cacheline_size = 1024;
14510         else
14511                 cacheline_size = (int) byte * 4;
14512
14513         /* On 5703 and later chips, the boundary bits have no
14514          * effect.
14515          */
14516         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14517             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14518             !tg3_flag(tp, PCI_EXPRESS))
14519                 goto out;
14520
14521 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14522         goal = BOUNDARY_MULTI_CACHELINE;
14523 #else
14524 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14525         goal = BOUNDARY_SINGLE_CACHELINE;
14526 #else
14527         goal = 0;
14528 #endif
14529 #endif
14530
14531         if (tg3_flag(tp, 57765_PLUS)) {
14532                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14533                 goto out;
14534         }
14535
14536         if (!goal)
14537                 goto out;
14538
14539         /* PCI controllers on most RISC systems tend to disconnect
14540          * when a device tries to burst across a cache-line boundary.
14541          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14542          *
14543          * Unfortunately, for PCI-E there are only limited
14544          * write-side controls for this, and thus for reads
14545          * we will still get the disconnects.  We'll also waste
14546          * these PCI cycles for both read and write for chips
14547          * other than 5700 and 5701 which do not implement the
14548          * boundary bits.
14549          */
14550         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14551                 switch (cacheline_size) {
14552                 case 16:
14553                 case 32:
14554                 case 64:
14555                 case 128:
14556                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14557                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14558                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14559                         } else {
14560                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14561                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14562                         }
14563                         break;
14564
14565                 case 256:
14566                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14567                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14568                         break;
14569
14570                 default:
14571                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14572                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14573                         break;
14574                 }
14575         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14576                 switch (cacheline_size) {
14577                 case 16:
14578                 case 32:
14579                 case 64:
14580                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14581                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14582                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14583                                 break;
14584                         }
14585                         /* fallthrough */
14586                 case 128:
14587                 default:
14588                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14589                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14590                         break;
14591                 }
14592         } else {
14593                 switch (cacheline_size) {
14594                 case 16:
14595                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14596                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14597                                         DMA_RWCTRL_WRITE_BNDRY_16);
14598                                 break;
14599                         }
14600                         /* fallthrough */
14601                 case 32:
14602                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14603                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14604                                         DMA_RWCTRL_WRITE_BNDRY_32);
14605                                 break;
14606                         }
14607                         /* fallthrough */
14608                 case 64:
14609                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14610                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14611                                         DMA_RWCTRL_WRITE_BNDRY_64);
14612                                 break;
14613                         }
14614                         /* fallthrough */
14615                 case 128:
14616                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14617                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14618                                         DMA_RWCTRL_WRITE_BNDRY_128);
14619                                 break;
14620                         }
14621                         /* fallthrough */
14622                 case 256:
14623                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14624                                 DMA_RWCTRL_WRITE_BNDRY_256);
14625                         break;
14626                 case 512:
14627                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14628                                 DMA_RWCTRL_WRITE_BNDRY_512);
14629                         break;
14630                 case 1024:
14631                 default:
14632                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14633                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14634                         break;
14635                 }
14636         }
14637
14638 out:
14639         return val;
14640 }
14641
14642 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14643 {
14644         struct tg3_internal_buffer_desc test_desc;
14645         u32 sram_dma_descs;
14646         int i, ret;
14647
14648         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14649
14650         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14651         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14652         tw32(RDMAC_STATUS, 0);
14653         tw32(WDMAC_STATUS, 0);
14654
14655         tw32(BUFMGR_MODE, 0);
14656         tw32(FTQ_RESET, 0);
14657
14658         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14659         test_desc.addr_lo = buf_dma & 0xffffffff;
14660         test_desc.nic_mbuf = 0x00002100;
14661         test_desc.len = size;
14662
14663         /*
14664          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14665          * the *second* time the tg3 driver was getting loaded after an
14666          * initial scan.
14667          *
14668          * Broadcom tells me:
14669          *   ...the DMA engine is connected to the GRC block and a DMA
14670          *   reset may affect the GRC block in some unpredictable way...
14671          *   The behavior of resets to individual blocks has not been tested.
14672          *
14673          * Broadcom noted the GRC reset will also reset all sub-components.
14674          */
14675         if (to_device) {
14676                 test_desc.cqid_sqid = (13 << 8) | 2;
14677
14678                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14679                 udelay(40);
14680         } else {
14681                 test_desc.cqid_sqid = (16 << 8) | 7;
14682
14683                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14684                 udelay(40);
14685         }
14686         test_desc.flags = 0x00000005;
14687
14688         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14689                 u32 val;
14690
14691                 val = *(((u32 *)&test_desc) + i);
14692                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14693                                        sram_dma_descs + (i * sizeof(u32)));
14694                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14695         }
14696         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14697
14698         if (to_device)
14699                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14700         else
14701                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14702
14703         ret = -ENODEV;
14704         for (i = 0; i < 40; i++) {
14705                 u32 val;
14706
14707                 if (to_device)
14708                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14709                 else
14710                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14711                 if ((val & 0xffff) == sram_dma_descs) {
14712                         ret = 0;
14713                         break;
14714                 }
14715
14716                 udelay(100);
14717         }
14718
14719         return ret;
14720 }
14721
14722 #define TEST_BUFFER_SIZE        0x2000
14723
14724 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14725         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14726         { },
14727 };
14728
14729 static int __devinit tg3_test_dma(struct tg3 *tp)
14730 {
14731         dma_addr_t buf_dma;
14732         u32 *buf, saved_dma_rwctrl;
14733         int ret = 0;
14734
14735         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14736                                  &buf_dma, GFP_KERNEL);
14737         if (!buf) {
14738                 ret = -ENOMEM;
14739                 goto out_nofree;
14740         }
14741
14742         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14743                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14744
14745         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14746
14747         if (tg3_flag(tp, 57765_PLUS))
14748                 goto out;
14749
14750         if (tg3_flag(tp, PCI_EXPRESS)) {
14751                 /* DMA read watermark not used on PCIE */
14752                 tp->dma_rwctrl |= 0x00180000;
14753         } else if (!tg3_flag(tp, PCIX_MODE)) {
14754                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14755                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14756                         tp->dma_rwctrl |= 0x003f0000;
14757                 else
14758                         tp->dma_rwctrl |= 0x003f000f;
14759         } else {
14760                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14761                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14762                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14763                         u32 read_water = 0x7;
14764
14765                         /* If the 5704 is behind the EPB bridge, we can
14766                          * do the less restrictive ONE_DMA workaround for
14767                          * better performance.
14768                          */
14769                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14770                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14771                                 tp->dma_rwctrl |= 0x8000;
14772                         else if (ccval == 0x6 || ccval == 0x7)
14773                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14774
14775                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14776                                 read_water = 4;
14777                         /* Set bit 23 to enable PCIX hw bug fix */
14778                         tp->dma_rwctrl |=
14779                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14780                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14781                                 (1 << 23);
14782                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14783                         /* 5780 always in PCIX mode */
14784                         tp->dma_rwctrl |= 0x00144000;
14785                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14786                         /* 5714 always in PCIX mode */
14787                         tp->dma_rwctrl |= 0x00148000;
14788                 } else {
14789                         tp->dma_rwctrl |= 0x001b000f;
14790                 }
14791         }
14792
14793         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14794             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14795                 tp->dma_rwctrl &= 0xfffffff0;
14796
14797         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14798             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14799                 /* Remove this if it causes problems for some boards. */
14800                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14801
14802                 /* On 5700/5701 chips, we need to set this bit.
14803                  * Otherwise the chip will issue cacheline transactions
14804                  * to streamable DMA memory with not all the byte
14805                  * enables turned on.  This is an error on several
14806                  * RISC PCI controllers, in particular sparc64.
14807                  *
14808                  * On 5703/5704 chips, this bit has been reassigned
14809                  * a different meaning.  In particular, it is used
14810                  * on those chips to enable a PCI-X workaround.
14811                  */
14812                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14813         }
14814
14815         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14816
14817 #if 0
14818         /* Unneeded, already done by tg3_get_invariants.  */
14819         tg3_switch_clocks(tp);
14820 #endif
14821
14822         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14823             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14824                 goto out;
14825
14826         /* It is best to perform DMA test with maximum write burst size
14827          * to expose the 5700/5701 write DMA bug.
14828          */
14829         saved_dma_rwctrl = tp->dma_rwctrl;
14830         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14831         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14832
14833         while (1) {
14834                 u32 *p = buf, i;
14835
14836                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14837                         p[i] = i;
14838
14839                 /* Send the buffer to the chip. */
14840                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14841                 if (ret) {
14842                         dev_err(&tp->pdev->dev,
14843                                 "%s: Buffer write failed. err = %d\n",
14844                                 __func__, ret);
14845                         break;
14846                 }
14847
14848 #if 0
14849                 /* validate data reached card RAM correctly. */
14850                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14851                         u32 val;
14852                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14853                         if (le32_to_cpu(val) != p[i]) {
14854                                 dev_err(&tp->pdev->dev,
14855                                         "%s: Buffer corrupted on device! "
14856                                         "(%d != %d)\n", __func__, val, i);
14857                                 /* ret = -ENODEV here? */
14858                         }
14859                         p[i] = 0;
14860                 }
14861 #endif
14862                 /* Now read it back. */
14863                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14864                 if (ret) {
14865                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14866                                 "err = %d\n", __func__, ret);
14867                         break;
14868                 }
14869
14870                 /* Verify it. */
14871                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14872                         if (p[i] == i)
14873                                 continue;
14874
14875                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14876                             DMA_RWCTRL_WRITE_BNDRY_16) {
14877                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14878                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14879                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14880                                 break;
14881                         } else {
14882                                 dev_err(&tp->pdev->dev,
14883                                         "%s: Buffer corrupted on read back! "
14884                                         "(%d != %d)\n", __func__, p[i], i);
14885                                 ret = -ENODEV;
14886                                 goto out;
14887                         }
14888                 }
14889
14890                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14891                         /* Success. */
14892                         ret = 0;
14893                         break;
14894                 }
14895         }
14896         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14897             DMA_RWCTRL_WRITE_BNDRY_16) {
14898                 /* DMA test passed without adjusting DMA boundary,
14899                  * now look for chipsets that are known to expose the
14900                  * DMA bug without failing the test.
14901                  */
14902                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14903                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14904                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14905                 } else {
14906                         /* Safe to use the calculated DMA boundary. */
14907                         tp->dma_rwctrl = saved_dma_rwctrl;
14908                 }
14909
14910                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14911         }
14912
14913 out:
14914         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14915 out_nofree:
14916         return ret;
14917 }
14918
14919 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14920 {
14921         if (tg3_flag(tp, 57765_PLUS)) {
14922                 tp->bufmgr_config.mbuf_read_dma_low_water =
14923                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14924                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14925                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14926                 tp->bufmgr_config.mbuf_high_water =
14927                         DEFAULT_MB_HIGH_WATER_57765;
14928
14929                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14930                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14931                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14932                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14933                 tp->bufmgr_config.mbuf_high_water_jumbo =
14934                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14935         } else if (tg3_flag(tp, 5705_PLUS)) {
14936                 tp->bufmgr_config.mbuf_read_dma_low_water =
14937                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14938                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14939                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14940                 tp->bufmgr_config.mbuf_high_water =
14941                         DEFAULT_MB_HIGH_WATER_5705;
14942                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14943                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14944                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14945                         tp->bufmgr_config.mbuf_high_water =
14946                                 DEFAULT_MB_HIGH_WATER_5906;
14947                 }
14948
14949                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14950                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14951                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14952                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14953                 tp->bufmgr_config.mbuf_high_water_jumbo =
14954                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14955         } else {
14956                 tp->bufmgr_config.mbuf_read_dma_low_water =
14957                         DEFAULT_MB_RDMA_LOW_WATER;
14958                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14959                         DEFAULT_MB_MACRX_LOW_WATER;
14960                 tp->bufmgr_config.mbuf_high_water =
14961                         DEFAULT_MB_HIGH_WATER;
14962
14963                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14964                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14965                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14966                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14967                 tp->bufmgr_config.mbuf_high_water_jumbo =
14968                         DEFAULT_MB_HIGH_WATER_JUMBO;
14969         }
14970
14971         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14972         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14973 }
14974
14975 static char * __devinit tg3_phy_string(struct tg3 *tp)
14976 {
14977         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14978         case TG3_PHY_ID_BCM5400:        return "5400";
14979         case TG3_PHY_ID_BCM5401:        return "5401";
14980         case TG3_PHY_ID_BCM5411:        return "5411";
14981         case TG3_PHY_ID_BCM5701:        return "5701";
14982         case TG3_PHY_ID_BCM5703:        return "5703";
14983         case TG3_PHY_ID_BCM5704:        return "5704";
14984         case TG3_PHY_ID_BCM5705:        return "5705";
14985         case TG3_PHY_ID_BCM5750:        return "5750";
14986         case TG3_PHY_ID_BCM5752:        return "5752";
14987         case TG3_PHY_ID_BCM5714:        return "5714";
14988         case TG3_PHY_ID_BCM5780:        return "5780";
14989         case TG3_PHY_ID_BCM5755:        return "5755";
14990         case TG3_PHY_ID_BCM5787:        return "5787";
14991         case TG3_PHY_ID_BCM5784:        return "5784";
14992         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14993         case TG3_PHY_ID_BCM5906:        return "5906";
14994         case TG3_PHY_ID_BCM5761:        return "5761";
14995         case TG3_PHY_ID_BCM5718C:       return "5718C";
14996         case TG3_PHY_ID_BCM5718S:       return "5718S";
14997         case TG3_PHY_ID_BCM57765:       return "57765";
14998         case TG3_PHY_ID_BCM5719C:       return "5719C";
14999         case TG3_PHY_ID_BCM5720C:       return "5720C";
15000         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15001         case 0:                 return "serdes";
15002         default:                return "unknown";
15003         }
15004 }
15005
15006 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15007 {
15008         if (tg3_flag(tp, PCI_EXPRESS)) {
15009                 strcpy(str, "PCI Express");
15010                 return str;
15011         } else if (tg3_flag(tp, PCIX_MODE)) {
15012                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15013
15014                 strcpy(str, "PCIX:");
15015
15016                 if ((clock_ctrl == 7) ||
15017                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15018                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15019                         strcat(str, "133MHz");
15020                 else if (clock_ctrl == 0)
15021                         strcat(str, "33MHz");
15022                 else if (clock_ctrl == 2)
15023                         strcat(str, "50MHz");
15024                 else if (clock_ctrl == 4)
15025                         strcat(str, "66MHz");
15026                 else if (clock_ctrl == 6)
15027                         strcat(str, "100MHz");
15028         } else {
15029                 strcpy(str, "PCI:");
15030                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15031                         strcat(str, "66MHz");
15032                 else
15033                         strcat(str, "33MHz");
15034         }
15035         if (tg3_flag(tp, PCI_32BIT))
15036                 strcat(str, ":32-bit");
15037         else
15038                 strcat(str, ":64-bit");
15039         return str;
15040 }
15041
15042 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15043 {
15044         struct pci_dev *peer;
15045         unsigned int func, devnr = tp->pdev->devfn & ~7;
15046
15047         for (func = 0; func < 8; func++) {
15048                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15049                 if (peer && peer != tp->pdev)
15050                         break;
15051                 pci_dev_put(peer);
15052         }
15053         /* 5704 can be configured in single-port mode, set peer to
15054          * tp->pdev in that case.
15055          */
15056         if (!peer) {
15057                 peer = tp->pdev;
15058                 return peer;
15059         }
15060
15061         /*
15062          * We don't need to keep the refcount elevated; there's no way
15063          * to remove one half of this device without removing the other
15064          */
15065         pci_dev_put(peer);
15066
15067         return peer;
15068 }
15069
15070 static void __devinit tg3_init_coal(struct tg3 *tp)
15071 {
15072         struct ethtool_coalesce *ec = &tp->coal;
15073
15074         memset(ec, 0, sizeof(*ec));
15075         ec->cmd = ETHTOOL_GCOALESCE;
15076         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15077         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15078         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15079         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15080         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15081         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15082         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15083         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15084         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15085
15086         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15087                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15088                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15089                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15090                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15091                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15092         }
15093
15094         if (tg3_flag(tp, 5705_PLUS)) {
15095                 ec->rx_coalesce_usecs_irq = 0;
15096                 ec->tx_coalesce_usecs_irq = 0;
15097                 ec->stats_block_coalesce_usecs = 0;
15098         }
15099 }
15100
15101 static const struct net_device_ops tg3_netdev_ops = {
15102         .ndo_open               = tg3_open,
15103         .ndo_stop               = tg3_close,
15104         .ndo_start_xmit         = tg3_start_xmit,
15105         .ndo_get_stats64        = tg3_get_stats64,
15106         .ndo_validate_addr      = eth_validate_addr,
15107         .ndo_set_multicast_list = tg3_set_rx_mode,
15108         .ndo_set_mac_address    = tg3_set_mac_addr,
15109         .ndo_do_ioctl           = tg3_ioctl,
15110         .ndo_tx_timeout         = tg3_tx_timeout,
15111         .ndo_change_mtu         = tg3_change_mtu,
15112         .ndo_fix_features       = tg3_fix_features,
15113         .ndo_set_features       = tg3_set_features,
15114 #ifdef CONFIG_NET_POLL_CONTROLLER
15115         .ndo_poll_controller    = tg3_poll_controller,
15116 #endif
15117 };
15118
15119 static int __devinit tg3_init_one(struct pci_dev *pdev,
15120                                   const struct pci_device_id *ent)
15121 {
15122         struct net_device *dev;
15123         struct tg3 *tp;
15124         int i, err, pm_cap;
15125         u32 sndmbx, rcvmbx, intmbx;
15126         char str[40];
15127         u64 dma_mask, persist_dma_mask;
15128         u32 features = 0;
15129
15130         printk_once(KERN_INFO "%s\n", version);
15131
15132         err = pci_enable_device(pdev);
15133         if (err) {
15134                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15135                 return err;
15136         }
15137
15138         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15139         if (err) {
15140                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15141                 goto err_out_disable_pdev;
15142         }
15143
15144         pci_set_master(pdev);
15145
15146         /* Find power-management capability. */
15147         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15148         if (pm_cap == 0) {
15149                 dev_err(&pdev->dev,
15150                         "Cannot find Power Management capability, aborting\n");
15151                 err = -EIO;
15152                 goto err_out_free_res;
15153         }
15154
15155         err = pci_set_power_state(pdev, PCI_D0);
15156         if (err) {
15157                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15158                 goto err_out_free_res;
15159         }
15160
15161         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15162         if (!dev) {
15163                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15164                 err = -ENOMEM;
15165                 goto err_out_power_down;
15166         }
15167
15168         SET_NETDEV_DEV(dev, &pdev->dev);
15169
15170         tp = netdev_priv(dev);
15171         tp->pdev = pdev;
15172         tp->dev = dev;
15173         tp->pm_cap = pm_cap;
15174         tp->rx_mode = TG3_DEF_RX_MODE;
15175         tp->tx_mode = TG3_DEF_TX_MODE;
15176
15177         if (tg3_debug > 0)
15178                 tp->msg_enable = tg3_debug;
15179         else
15180                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15181
15182         /* The word/byte swap controls here control register access byte
15183          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15184          * setting below.
15185          */
15186         tp->misc_host_ctrl =
15187                 MISC_HOST_CTRL_MASK_PCI_INT |
15188                 MISC_HOST_CTRL_WORD_SWAP |
15189                 MISC_HOST_CTRL_INDIR_ACCESS |
15190                 MISC_HOST_CTRL_PCISTATE_RW;
15191
15192         /* The NONFRM (non-frame) byte/word swap controls take effect
15193          * on descriptor entries, anything which isn't packet data.
15194          *
15195          * The StrongARM chips on the board (one for tx, one for rx)
15196          * are running in big-endian mode.
15197          */
15198         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15199                         GRC_MODE_WSWAP_NONFRM_DATA);
15200 #ifdef __BIG_ENDIAN
15201         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15202 #endif
15203         spin_lock_init(&tp->lock);
15204         spin_lock_init(&tp->indirect_lock);
15205         INIT_WORK(&tp->reset_task, tg3_reset_task);
15206
15207         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15208         if (!tp->regs) {
15209                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15210                 err = -ENOMEM;
15211                 goto err_out_free_dev;
15212         }
15213
15214         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15215             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15216             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15217             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15218             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15219             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15220             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15221             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15222                 tg3_flag_set(tp, ENABLE_APE);
15223                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15224                 if (!tp->aperegs) {
15225                         dev_err(&pdev->dev,
15226                                 "Cannot map APE registers, aborting\n");
15227                         err = -ENOMEM;
15228                         goto err_out_iounmap;
15229                 }
15230         }
15231
15232         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15233         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15234
15235         dev->ethtool_ops = &tg3_ethtool_ops;
15236         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15237         dev->netdev_ops = &tg3_netdev_ops;
15238         dev->irq = pdev->irq;
15239
15240         err = tg3_get_invariants(tp);
15241         if (err) {
15242                 dev_err(&pdev->dev,
15243                         "Problem fetching invariants of chip, aborting\n");
15244                 goto err_out_apeunmap;
15245         }
15246
15247         /* The EPB bridge inside 5714, 5715, and 5780 and any
15248          * device behind the EPB cannot support DMA addresses > 40-bit.
15249          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15250          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15251          * do DMA address check in tg3_start_xmit().
15252          */
15253         if (tg3_flag(tp, IS_5788))
15254                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15255         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15256                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15257 #ifdef CONFIG_HIGHMEM
15258                 dma_mask = DMA_BIT_MASK(64);
15259 #endif
15260         } else
15261                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15262
15263         /* Configure DMA attributes. */
15264         if (dma_mask > DMA_BIT_MASK(32)) {
15265                 err = pci_set_dma_mask(pdev, dma_mask);
15266                 if (!err) {
15267                         features |= NETIF_F_HIGHDMA;
15268                         err = pci_set_consistent_dma_mask(pdev,
15269                                                           persist_dma_mask);
15270                         if (err < 0) {
15271                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15272                                         "DMA for consistent allocations\n");
15273                                 goto err_out_apeunmap;
15274                         }
15275                 }
15276         }
15277         if (err || dma_mask == DMA_BIT_MASK(32)) {
15278                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15279                 if (err) {
15280                         dev_err(&pdev->dev,
15281                                 "No usable DMA configuration, aborting\n");
15282                         goto err_out_apeunmap;
15283                 }
15284         }
15285
15286         tg3_init_bufmgr_config(tp);
15287
15288         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15289
15290         /* 5700 B0 chips do not support checksumming correctly due
15291          * to hardware bugs.
15292          */
15293         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15294                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15295
15296                 if (tg3_flag(tp, 5755_PLUS))
15297                         features |= NETIF_F_IPV6_CSUM;
15298         }
15299
15300         /* TSO is on by default on chips that support hardware TSO.
15301          * Firmware TSO on older chips gives lower performance, so it
15302          * is off by default, but can be enabled using ethtool.
15303          */
15304         if ((tg3_flag(tp, HW_TSO_1) ||
15305              tg3_flag(tp, HW_TSO_2) ||
15306              tg3_flag(tp, HW_TSO_3)) &&
15307             (features & NETIF_F_IP_CSUM))
15308                 features |= NETIF_F_TSO;
15309         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15310                 if (features & NETIF_F_IPV6_CSUM)
15311                         features |= NETIF_F_TSO6;
15312                 if (tg3_flag(tp, HW_TSO_3) ||
15313                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15314                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15315                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15316                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15317                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15318                         features |= NETIF_F_TSO_ECN;
15319         }
15320
15321         dev->features |= features;
15322         dev->vlan_features |= features;
15323
15324         /*
15325          * Add loopback capability only for a subset of devices that support
15326          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15327          * loopback for the remaining devices.
15328          */
15329         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15330             !tg3_flag(tp, CPMU_PRESENT))
15331                 /* Add the loopback capability */
15332                 features |= NETIF_F_LOOPBACK;
15333
15334         dev->hw_features |= features;
15335
15336         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15337             !tg3_flag(tp, TSO_CAPABLE) &&
15338             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15339                 tg3_flag_set(tp, MAX_RXPEND_64);
15340                 tp->rx_pending = 63;
15341         }
15342
15343         err = tg3_get_device_address(tp);
15344         if (err) {
15345                 dev_err(&pdev->dev,
15346                         "Could not obtain valid ethernet address, aborting\n");
15347                 goto err_out_apeunmap;
15348         }
15349
15350         /*
15351          * Reset chip in case UNDI or EFI driver did not shutdown
15352          * DMA self test will enable WDMAC and we'll see (spurious)
15353          * pending DMA on the PCI bus at that point.
15354          */
15355         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15356             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15357                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15358                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15359         }
15360
15361         err = tg3_test_dma(tp);
15362         if (err) {
15363                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15364                 goto err_out_apeunmap;
15365         }
15366
15367         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15368         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15369         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15370         for (i = 0; i < tp->irq_max; i++) {
15371                 struct tg3_napi *tnapi = &tp->napi[i];
15372
15373                 tnapi->tp = tp;
15374                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15375
15376                 tnapi->int_mbox = intmbx;
15377                 if (i < 4)
15378                         intmbx += 0x8;
15379                 else
15380                         intmbx += 0x4;
15381
15382                 tnapi->consmbox = rcvmbx;
15383                 tnapi->prodmbox = sndmbx;
15384
15385                 if (i)
15386                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15387                 else
15388                         tnapi->coal_now = HOSTCC_MODE_NOW;
15389
15390                 if (!tg3_flag(tp, SUPPORT_MSIX))
15391                         break;
15392
15393                 /*
15394                  * If we support MSIX, we'll be using RSS.  If we're using
15395                  * RSS, the first vector only handles link interrupts and the
15396                  * remaining vectors handle rx and tx interrupts.  Reuse the
15397                  * mailbox values for the next iteration.  The values we setup
15398                  * above are still useful for the single vectored mode.
15399                  */
15400                 if (!i)
15401                         continue;
15402
15403                 rcvmbx += 0x8;
15404
15405                 if (sndmbx & 0x4)
15406                         sndmbx -= 0x4;
15407                 else
15408                         sndmbx += 0xc;
15409         }
15410
15411         tg3_init_coal(tp);
15412
15413         pci_set_drvdata(pdev, dev);
15414
15415         if (tg3_flag(tp, 5717_PLUS)) {
15416                 /* Resume a low-power mode */
15417                 tg3_frob_aux_power(tp, false);
15418         }
15419
15420         err = register_netdev(dev);
15421         if (err) {
15422                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15423                 goto err_out_apeunmap;
15424         }
15425
15426         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15427                     tp->board_part_number,
15428                     tp->pci_chip_rev_id,
15429                     tg3_bus_string(tp, str),
15430                     dev->dev_addr);
15431
15432         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15433                 struct phy_device *phydev;
15434                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15435                 netdev_info(dev,
15436                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15437                             phydev->drv->name, dev_name(&phydev->dev));
15438         } else {
15439                 char *ethtype;
15440
15441                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15442                         ethtype = "10/100Base-TX";
15443                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15444                         ethtype = "1000Base-SX";
15445                 else
15446                         ethtype = "10/100/1000Base-T";
15447
15448                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15449                             "(WireSpeed[%d], EEE[%d])\n",
15450                             tg3_phy_string(tp), ethtype,
15451                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15452                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15453         }
15454
15455         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15456                     (dev->features & NETIF_F_RXCSUM) != 0,
15457                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15458                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15459                     tg3_flag(tp, ENABLE_ASF) != 0,
15460                     tg3_flag(tp, TSO_CAPABLE) != 0);
15461         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15462                     tp->dma_rwctrl,
15463                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15464                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15465
15466         pci_save_state(pdev);
15467
15468         return 0;
15469
15470 err_out_apeunmap:
15471         if (tp->aperegs) {
15472                 iounmap(tp->aperegs);
15473                 tp->aperegs = NULL;
15474         }
15475
15476 err_out_iounmap:
15477         if (tp->regs) {
15478                 iounmap(tp->regs);
15479                 tp->regs = NULL;
15480         }
15481
15482 err_out_free_dev:
15483         free_netdev(dev);
15484
15485 err_out_power_down:
15486         pci_set_power_state(pdev, PCI_D3hot);
15487
15488 err_out_free_res:
15489         pci_release_regions(pdev);
15490
15491 err_out_disable_pdev:
15492         pci_disable_device(pdev);
15493         pci_set_drvdata(pdev, NULL);
15494         return err;
15495 }
15496
15497 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15498 {
15499         struct net_device *dev = pci_get_drvdata(pdev);
15500
15501         if (dev) {
15502                 struct tg3 *tp = netdev_priv(dev);
15503
15504                 if (tp->fw)
15505                         release_firmware(tp->fw);
15506
15507                 cancel_work_sync(&tp->reset_task);
15508
15509                 if (!tg3_flag(tp, USE_PHYLIB)) {
15510                         tg3_phy_fini(tp);
15511                         tg3_mdio_fini(tp);
15512                 }
15513
15514                 unregister_netdev(dev);
15515                 if (tp->aperegs) {
15516                         iounmap(tp->aperegs);
15517                         tp->aperegs = NULL;
15518                 }
15519                 if (tp->regs) {
15520                         iounmap(tp->regs);
15521                         tp->regs = NULL;
15522                 }
15523                 free_netdev(dev);
15524                 pci_release_regions(pdev);
15525                 pci_disable_device(pdev);
15526                 pci_set_drvdata(pdev, NULL);
15527         }
15528 }
15529
15530 #ifdef CONFIG_PM_SLEEP
15531 static int tg3_suspend(struct device *device)
15532 {
15533         struct pci_dev *pdev = to_pci_dev(device);
15534         struct net_device *dev = pci_get_drvdata(pdev);
15535         struct tg3 *tp = netdev_priv(dev);
15536         int err;
15537
15538         if (!netif_running(dev))
15539                 return 0;
15540
15541         flush_work_sync(&tp->reset_task);
15542         tg3_phy_stop(tp);
15543         tg3_netif_stop(tp);
15544
15545         del_timer_sync(&tp->timer);
15546
15547         tg3_full_lock(tp, 1);
15548         tg3_disable_ints(tp);
15549         tg3_full_unlock(tp);
15550
15551         netif_device_detach(dev);
15552
15553         tg3_full_lock(tp, 0);
15554         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15555         tg3_flag_clear(tp, INIT_COMPLETE);
15556         tg3_full_unlock(tp);
15557
15558         err = tg3_power_down_prepare(tp);
15559         if (err) {
15560                 int err2;
15561
15562                 tg3_full_lock(tp, 0);
15563
15564                 tg3_flag_set(tp, INIT_COMPLETE);
15565                 err2 = tg3_restart_hw(tp, 1);
15566                 if (err2)
15567                         goto out;
15568
15569                 tp->timer.expires = jiffies + tp->timer_offset;
15570                 add_timer(&tp->timer);
15571
15572                 netif_device_attach(dev);
15573                 tg3_netif_start(tp);
15574
15575 out:
15576                 tg3_full_unlock(tp);
15577
15578                 if (!err2)
15579                         tg3_phy_start(tp);
15580         }
15581
15582         return err;
15583 }
15584
15585 static int tg3_resume(struct device *device)
15586 {
15587         struct pci_dev *pdev = to_pci_dev(device);
15588         struct net_device *dev = pci_get_drvdata(pdev);
15589         struct tg3 *tp = netdev_priv(dev);
15590         int err;
15591
15592         if (!netif_running(dev))
15593                 return 0;
15594
15595         netif_device_attach(dev);
15596
15597         tg3_full_lock(tp, 0);
15598
15599         tg3_flag_set(tp, INIT_COMPLETE);
15600         err = tg3_restart_hw(tp, 1);
15601         if (err)
15602                 goto out;
15603
15604         tp->timer.expires = jiffies + tp->timer_offset;
15605         add_timer(&tp->timer);
15606
15607         tg3_netif_start(tp);
15608
15609 out:
15610         tg3_full_unlock(tp);
15611
15612         if (!err)
15613                 tg3_phy_start(tp);
15614
15615         return err;
15616 }
15617
15618 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15619 #define TG3_PM_OPS (&tg3_pm_ops)
15620
15621 #else
15622
15623 #define TG3_PM_OPS NULL
15624
15625 #endif /* CONFIG_PM_SLEEP */
15626
15627 /**
15628  * tg3_io_error_detected - called when PCI error is detected
15629  * @pdev: Pointer to PCI device
15630  * @state: The current pci connection state
15631  *
15632  * This function is called after a PCI bus error affecting
15633  * this device has been detected.
15634  */
15635 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15636                                               pci_channel_state_t state)
15637 {
15638         struct net_device *netdev = pci_get_drvdata(pdev);
15639         struct tg3 *tp = netdev_priv(netdev);
15640         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15641
15642         netdev_info(netdev, "PCI I/O error detected\n");
15643
15644         rtnl_lock();
15645
15646         if (!netif_running(netdev))
15647                 goto done;
15648
15649         tg3_phy_stop(tp);
15650
15651         tg3_netif_stop(tp);
15652
15653         del_timer_sync(&tp->timer);
15654         tg3_flag_clear(tp, RESTART_TIMER);
15655
15656         /* Want to make sure that the reset task doesn't run */
15657         cancel_work_sync(&tp->reset_task);
15658         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15659         tg3_flag_clear(tp, RESTART_TIMER);
15660
15661         netif_device_detach(netdev);
15662
15663         /* Clean up software state, even if MMIO is blocked */
15664         tg3_full_lock(tp, 0);
15665         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15666         tg3_full_unlock(tp);
15667
15668 done:
15669         if (state == pci_channel_io_perm_failure)
15670                 err = PCI_ERS_RESULT_DISCONNECT;
15671         else
15672                 pci_disable_device(pdev);
15673
15674         rtnl_unlock();
15675
15676         return err;
15677 }
15678
15679 /**
15680  * tg3_io_slot_reset - called after the pci bus has been reset.
15681  * @pdev: Pointer to PCI device
15682  *
15683  * Restart the card from scratch, as if from a cold-boot.
15684  * At this point, the card has exprienced a hard reset,
15685  * followed by fixups by BIOS, and has its config space
15686  * set up identically to what it was at cold boot.
15687  */
15688 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15689 {
15690         struct net_device *netdev = pci_get_drvdata(pdev);
15691         struct tg3 *tp = netdev_priv(netdev);
15692         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15693         int err;
15694
15695         rtnl_lock();
15696
15697         if (pci_enable_device(pdev)) {
15698                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15699                 goto done;
15700         }
15701
15702         pci_set_master(pdev);
15703         pci_restore_state(pdev);
15704         pci_save_state(pdev);
15705
15706         if (!netif_running(netdev)) {
15707                 rc = PCI_ERS_RESULT_RECOVERED;
15708                 goto done;
15709         }
15710
15711         err = tg3_power_up(tp);
15712         if (err)
15713                 goto done;
15714
15715         rc = PCI_ERS_RESULT_RECOVERED;
15716
15717 done:
15718         rtnl_unlock();
15719
15720         return rc;
15721 }
15722
15723 /**
15724  * tg3_io_resume - called when traffic can start flowing again.
15725  * @pdev: Pointer to PCI device
15726  *
15727  * This callback is called when the error recovery driver tells
15728  * us that its OK to resume normal operation.
15729  */
15730 static void tg3_io_resume(struct pci_dev *pdev)
15731 {
15732         struct net_device *netdev = pci_get_drvdata(pdev);
15733         struct tg3 *tp = netdev_priv(netdev);
15734         int err;
15735
15736         rtnl_lock();
15737
15738         if (!netif_running(netdev))
15739                 goto done;
15740
15741         tg3_full_lock(tp, 0);
15742         tg3_flag_set(tp, INIT_COMPLETE);
15743         err = tg3_restart_hw(tp, 1);
15744         tg3_full_unlock(tp);
15745         if (err) {
15746                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15747                 goto done;
15748         }
15749
15750         netif_device_attach(netdev);
15751
15752         tp->timer.expires = jiffies + tp->timer_offset;
15753         add_timer(&tp->timer);
15754
15755         tg3_netif_start(tp);
15756
15757         tg3_phy_start(tp);
15758
15759 done:
15760         rtnl_unlock();
15761 }
15762
15763 static struct pci_error_handlers tg3_err_handler = {
15764         .error_detected = tg3_io_error_detected,
15765         .slot_reset     = tg3_io_slot_reset,
15766         .resume         = tg3_io_resume
15767 };
15768
15769 static struct pci_driver tg3_driver = {
15770         .name           = DRV_MODULE_NAME,
15771         .id_table       = tg3_pci_tbl,
15772         .probe          = tg3_init_one,
15773         .remove         = __devexit_p(tg3_remove_one),
15774         .err_handler    = &tg3_err_handler,
15775         .driver.pm      = TG3_PM_OPS,
15776 };
15777
15778 static int __init tg3_init(void)
15779 {
15780         return pci_register_driver(&tg3_driver);
15781 }
15782
15783 static void __exit tg3_cleanup(void)
15784 {
15785         pci_unregister_driver(&tg3_driver);
15786 }
15787
15788 module_init(tg3_init);
15789 module_exit(tg3_cleanup);