d2f1ecb3f6587fe13fd85008cbc0a7af1cc1201a
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     120
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "August 18, 2011"
96
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
110
111 /* length of time before we decide the hardware is borked,
112  * and dev->tx_timeout() should be called to fix the problem
113  */
114
115 #define TG3_TX_TIMEOUT                  (5 * HZ)
116
117 /* hardware minimum and maximum for a single frame's data payload */
118 #define TG3_MIN_MTU                     60
119 #define TG3_MAX_MTU(tp) \
120         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
121
122 /* These numbers seem to be hard coded in the NIC firmware somehow.
123  * You can't change the ring sizes, but you can change where you place
124  * them in the NIC onboard memory.
125  */
126 #define TG3_RX_STD_RING_SIZE(tp) \
127         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
128          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
129 #define TG3_DEF_RX_RING_PENDING         200
130 #define TG3_RX_JMB_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
133 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
134 #define TG3_RSS_INDIR_TBL_SIZE          128
135
136 /* Do not place this n-ring entries value into the tp struct itself,
137  * we really want to expose these constants to GCC so that modulo et
138  * al.  operations are done with shifts and masks instead of with
139  * hw multiply/modulo instructions.  Another solution would be to
140  * replace things like '% foo' with '& (foo - 1)'.
141  */
142
143 #define TG3_TX_RING_SIZE                512
144 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
145
146 #define TG3_RX_STD_RING_BYTES(tp) \
147         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
148 #define TG3_RX_JMB_RING_BYTES(tp) \
149         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
150 #define TG3_RX_RCB_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
152 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
153                                  TG3_TX_RING_SIZE)
154 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155
156 #define TG3_DMA_BYTE_ENAB               64
157
158 #define TG3_RX_STD_DMA_SZ               1536
159 #define TG3_RX_JMB_DMA_SZ               9046
160
161 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
162
163 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
164 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
165
166 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
167         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
168
169 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
171
172 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
173  * that are at least dword aligned when used in PCIX mode.  The driver
174  * works around this bug by double copying the packet.  This workaround
175  * is built into the normal double copy length check for efficiency.
176  *
177  * However, the double copy is only necessary on those architectures
178  * where unaligned memory accesses are inefficient.  For those architectures
179  * where unaligned memory accesses incur little penalty, we can reintegrate
180  * the 5701 in the normal rx path.  Doing so saves a device structure
181  * dereference by hardcoding the double copy threshold in place.
182  */
183 #define TG3_RX_COPY_THRESHOLD           256
184 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
185         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
186 #else
187         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
188 #endif
189
190 #if (NET_IP_ALIGN != 0)
191 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
192 #else
193 #define TG3_RX_OFFSET(tp)       0
194 #endif
195
196 /* minimum number of free TX descriptors required to wake up TX process */
197 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
198 #define TG3_TX_BD_DMA_MAX               4096
199
200 #define TG3_RAW_IP_ALIGN 2
201
202 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
203
204 #define FIRMWARE_TG3            "tigon/tg3.bin"
205 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
206 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
207
208 static char version[] __devinitdata =
209         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
210
211 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
212 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
213 MODULE_LICENSE("GPL");
214 MODULE_VERSION(DRV_MODULE_VERSION);
215 MODULE_FIRMWARE(FIRMWARE_TG3);
216 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
217 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
218
219 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
220 module_param(tg3_debug, int, 0);
221 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
222
223 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
297         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
298         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
299         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
300         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
301         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
302         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
303         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
304         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
305         {}
306 };
307
308 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
309
310 static const struct {
311         const char string[ETH_GSTRING_LEN];
312 } ethtool_stats_keys[] = {
313         { "rx_octets" },
314         { "rx_fragments" },
315         { "rx_ucast_packets" },
316         { "rx_mcast_packets" },
317         { "rx_bcast_packets" },
318         { "rx_fcs_errors" },
319         { "rx_align_errors" },
320         { "rx_xon_pause_rcvd" },
321         { "rx_xoff_pause_rcvd" },
322         { "rx_mac_ctrl_rcvd" },
323         { "rx_xoff_entered" },
324         { "rx_frame_too_long_errors" },
325         { "rx_jabbers" },
326         { "rx_undersize_packets" },
327         { "rx_in_length_errors" },
328         { "rx_out_length_errors" },
329         { "rx_64_or_less_octet_packets" },
330         { "rx_65_to_127_octet_packets" },
331         { "rx_128_to_255_octet_packets" },
332         { "rx_256_to_511_octet_packets" },
333         { "rx_512_to_1023_octet_packets" },
334         { "rx_1024_to_1522_octet_packets" },
335         { "rx_1523_to_2047_octet_packets" },
336         { "rx_2048_to_4095_octet_packets" },
337         { "rx_4096_to_8191_octet_packets" },
338         { "rx_8192_to_9022_octet_packets" },
339
340         { "tx_octets" },
341         { "tx_collisions" },
342
343         { "tx_xon_sent" },
344         { "tx_xoff_sent" },
345         { "tx_flow_control" },
346         { "tx_mac_errors" },
347         { "tx_single_collisions" },
348         { "tx_mult_collisions" },
349         { "tx_deferred" },
350         { "tx_excessive_collisions" },
351         { "tx_late_collisions" },
352         { "tx_collide_2times" },
353         { "tx_collide_3times" },
354         { "tx_collide_4times" },
355         { "tx_collide_5times" },
356         { "tx_collide_6times" },
357         { "tx_collide_7times" },
358         { "tx_collide_8times" },
359         { "tx_collide_9times" },
360         { "tx_collide_10times" },
361         { "tx_collide_11times" },
362         { "tx_collide_12times" },
363         { "tx_collide_13times" },
364         { "tx_collide_14times" },
365         { "tx_collide_15times" },
366         { "tx_ucast_packets" },
367         { "tx_mcast_packets" },
368         { "tx_bcast_packets" },
369         { "tx_carrier_sense_errors" },
370         { "tx_discards" },
371         { "tx_errors" },
372
373         { "dma_writeq_full" },
374         { "dma_write_prioq_full" },
375         { "rxbds_empty" },
376         { "rx_discards" },
377         { "rx_errors" },
378         { "rx_threshold_hit" },
379
380         { "dma_readq_full" },
381         { "dma_read_prioq_full" },
382         { "tx_comp_queue_full" },
383
384         { "ring_set_send_prod_index" },
385         { "ring_status_update" },
386         { "nic_irqs" },
387         { "nic_avoided_irqs" },
388         { "nic_tx_threshold_hit" },
389
390         { "mbuf_lwm_thresh_hit" },
391 };
392
393 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
394
395
396 static const struct {
397         const char string[ETH_GSTRING_LEN];
398 } ethtool_test_keys[] = {
399         { "nvram test        (online) " },
400         { "link test         (online) " },
401         { "register test     (offline)" },
402         { "memory test       (offline)" },
403         { "mac loopback test (offline)" },
404         { "phy loopback test (offline)" },
405         { "ext loopback test (offline)" },
406         { "interrupt test    (offline)" },
407 };
408
409 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
410
411
412 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
413 {
414         writel(val, tp->regs + off);
415 }
416
417 static u32 tg3_read32(struct tg3 *tp, u32 off)
418 {
419         return readl(tp->regs + off);
420 }
421
422 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
423 {
424         writel(val, tp->aperegs + off);
425 }
426
427 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
428 {
429         return readl(tp->aperegs + off);
430 }
431
432 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
433 {
434         unsigned long flags;
435
436         spin_lock_irqsave(&tp->indirect_lock, flags);
437         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
438         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
439         spin_unlock_irqrestore(&tp->indirect_lock, flags);
440 }
441
442 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
443 {
444         writel(val, tp->regs + off);
445         readl(tp->regs + off);
446 }
447
448 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
449 {
450         unsigned long flags;
451         u32 val;
452
453         spin_lock_irqsave(&tp->indirect_lock, flags);
454         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
455         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
456         spin_unlock_irqrestore(&tp->indirect_lock, flags);
457         return val;
458 }
459
460 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
461 {
462         unsigned long flags;
463
464         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
465                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
466                                        TG3_64BIT_REG_LOW, val);
467                 return;
468         }
469         if (off == TG3_RX_STD_PROD_IDX_REG) {
470                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
471                                        TG3_64BIT_REG_LOW, val);
472                 return;
473         }
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
477         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
478         spin_unlock_irqrestore(&tp->indirect_lock, flags);
479
480         /* In indirect mode when disabling interrupts, we also need
481          * to clear the interrupt bit in the GRC local ctrl register.
482          */
483         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
484             (val == 0x1)) {
485                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
486                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
487         }
488 }
489
490 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
491 {
492         unsigned long flags;
493         u32 val;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
497         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499         return val;
500 }
501
502 /* usec_wait specifies the wait time in usec when writing to certain registers
503  * where it is unsafe to read back the register without some delay.
504  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
505  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
506  */
507 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
508 {
509         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
510                 /* Non-posted methods */
511                 tp->write32(tp, off, val);
512         else {
513                 /* Posted method */
514                 tg3_write32(tp, off, val);
515                 if (usec_wait)
516                         udelay(usec_wait);
517                 tp->read32(tp, off);
518         }
519         /* Wait again after the read for the posted method to guarantee that
520          * the wait time is met.
521          */
522         if (usec_wait)
523                 udelay(usec_wait);
524 }
525
526 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
527 {
528         tp->write32_mbox(tp, off, val);
529         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
530                 tp->read32_mbox(tp, off);
531 }
532
533 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
534 {
535         void __iomem *mbox = tp->regs + off;
536         writel(val, mbox);
537         if (tg3_flag(tp, TXD_MBOX_HWBUG))
538                 writel(val, mbox);
539         if (tg3_flag(tp, MBOX_WRITE_REORDER))
540                 readl(mbox);
541 }
542
543 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
544 {
545         return readl(tp->regs + off + GRCMBOX_BASE);
546 }
547
548 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
549 {
550         writel(val, tp->regs + off + GRCMBOX_BASE);
551 }
552
553 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
554 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
555 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
556 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
557 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
558
559 #define tw32(reg, val)                  tp->write32(tp, reg, val)
560 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
561 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
562 #define tr32(reg)                       tp->read32(tp, reg)
563
564 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
565 {
566         unsigned long flags;
567
568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
569             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
570                 return;
571
572         spin_lock_irqsave(&tp->indirect_lock, flags);
573         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
574                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
575                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
576
577                 /* Always leave this as zero. */
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
579         } else {
580                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
581                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
582
583                 /* Always leave this as zero. */
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
585         }
586         spin_unlock_irqrestore(&tp->indirect_lock, flags);
587 }
588
589 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
590 {
591         unsigned long flags;
592
593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
594             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
595                 *val = 0;
596                 return;
597         }
598
599         spin_lock_irqsave(&tp->indirect_lock, flags);
600         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
601                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
602                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
603
604                 /* Always leave this as zero. */
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
606         } else {
607                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
608                 *val = tr32(TG3PCI_MEM_WIN_DATA);
609
610                 /* Always leave this as zero. */
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
612         }
613         spin_unlock_irqrestore(&tp->indirect_lock, flags);
614 }
615
616 static void tg3_ape_lock_init(struct tg3 *tp)
617 {
618         int i;
619         u32 regbase, bit;
620
621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
622                 regbase = TG3_APE_LOCK_GRANT;
623         else
624                 regbase = TG3_APE_PER_LOCK_GRANT;
625
626         /* Make sure the driver hasn't any stale locks. */
627         for (i = 0; i < 8; i++) {
628                 if (i == TG3_APE_LOCK_GPIO)
629                         continue;
630                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
631         }
632
633         /* Clear the correct bit of the GPIO lock too. */
634         if (!tp->pci_fn)
635                 bit = APE_LOCK_GRANT_DRIVER;
636         else
637                 bit = 1 << tp->pci_fn;
638
639         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
640 }
641
642 static int tg3_ape_lock(struct tg3 *tp, int locknum)
643 {
644         int i, off;
645         int ret = 0;
646         u32 status, req, gnt, bit;
647
648         if (!tg3_flag(tp, ENABLE_APE))
649                 return 0;
650
651         switch (locknum) {
652         case TG3_APE_LOCK_GPIO:
653                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
654                         return 0;
655         case TG3_APE_LOCK_GRC:
656         case TG3_APE_LOCK_MEM:
657                 break;
658         default:
659                 return -EINVAL;
660         }
661
662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
663                 req = TG3_APE_LOCK_REQ;
664                 gnt = TG3_APE_LOCK_GRANT;
665         } else {
666                 req = TG3_APE_PER_LOCK_REQ;
667                 gnt = TG3_APE_PER_LOCK_GRANT;
668         }
669
670         off = 4 * locknum;
671
672         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
673                 bit = APE_LOCK_REQ_DRIVER;
674         else
675                 bit = 1 << tp->pci_fn;
676
677         tg3_ape_write32(tp, req + off, bit);
678
679         /* Wait for up to 1 millisecond to acquire lock. */
680         for (i = 0; i < 100; i++) {
681                 status = tg3_ape_read32(tp, gnt + off);
682                 if (status == bit)
683                         break;
684                 udelay(10);
685         }
686
687         if (status != bit) {
688                 /* Revoke the lock request. */
689                 tg3_ape_write32(tp, gnt + off, bit);
690                 ret = -EBUSY;
691         }
692
693         return ret;
694 }
695
696 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
697 {
698         u32 gnt, bit;
699
700         if (!tg3_flag(tp, ENABLE_APE))
701                 return;
702
703         switch (locknum) {
704         case TG3_APE_LOCK_GPIO:
705                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
706                         return;
707         case TG3_APE_LOCK_GRC:
708         case TG3_APE_LOCK_MEM:
709                 break;
710         default:
711                 return;
712         }
713
714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
715                 gnt = TG3_APE_LOCK_GRANT;
716         else
717                 gnt = TG3_APE_PER_LOCK_GRANT;
718
719         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
720                 bit = APE_LOCK_GRANT_DRIVER;
721         else
722                 bit = 1 << tp->pci_fn;
723
724         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
725 }
726
727 static void tg3_disable_ints(struct tg3 *tp)
728 {
729         int i;
730
731         tw32(TG3PCI_MISC_HOST_CTRL,
732              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
733         for (i = 0; i < tp->irq_max; i++)
734                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
735 }
736
737 static void tg3_enable_ints(struct tg3 *tp)
738 {
739         int i;
740
741         tp->irq_sync = 0;
742         wmb();
743
744         tw32(TG3PCI_MISC_HOST_CTRL,
745              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
746
747         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
748         for (i = 0; i < tp->irq_cnt; i++) {
749                 struct tg3_napi *tnapi = &tp->napi[i];
750
751                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
752                 if (tg3_flag(tp, 1SHOT_MSI))
753                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
754
755                 tp->coal_now |= tnapi->coal_now;
756         }
757
758         /* Force an initial interrupt */
759         if (!tg3_flag(tp, TAGGED_STATUS) &&
760             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
761                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
762         else
763                 tw32(HOSTCC_MODE, tp->coal_now);
764
765         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
766 }
767
768 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
769 {
770         struct tg3 *tp = tnapi->tp;
771         struct tg3_hw_status *sblk = tnapi->hw_status;
772         unsigned int work_exists = 0;
773
774         /* check for phy events */
775         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
776                 if (sblk->status & SD_STATUS_LINK_CHG)
777                         work_exists = 1;
778         }
779         /* check for RX/TX work to do */
780         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
781             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
782                 work_exists = 1;
783
784         return work_exists;
785 }
786
787 /* tg3_int_reenable
788  *  similar to tg3_enable_ints, but it accurately determines whether there
789  *  is new work pending and can return without flushing the PIO write
790  *  which reenables interrupts
791  */
792 static void tg3_int_reenable(struct tg3_napi *tnapi)
793 {
794         struct tg3 *tp = tnapi->tp;
795
796         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
797         mmiowb();
798
799         /* When doing tagged status, this work check is unnecessary.
800          * The last_tag we write above tells the chip which piece of
801          * work we've completed.
802          */
803         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
804                 tw32(HOSTCC_MODE, tp->coalesce_mode |
805                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
806 }
807
808 static void tg3_switch_clocks(struct tg3 *tp)
809 {
810         u32 clock_ctrl;
811         u32 orig_clock_ctrl;
812
813         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
814                 return;
815
816         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
817
818         orig_clock_ctrl = clock_ctrl;
819         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
820                        CLOCK_CTRL_CLKRUN_OENABLE |
821                        0x1f);
822         tp->pci_clock_ctrl = clock_ctrl;
823
824         if (tg3_flag(tp, 5705_PLUS)) {
825                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
826                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
828                 }
829         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
830                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
831                             clock_ctrl |
832                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
833                             40);
834                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
835                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
836                             40);
837         }
838         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
839 }
840
841 #define PHY_BUSY_LOOPS  5000
842
843 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
844 {
845         u32 frame_val;
846         unsigned int loops;
847         int ret;
848
849         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850                 tw32_f(MAC_MI_MODE,
851                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
852                 udelay(80);
853         }
854
855         *val = 0x0;
856
857         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
858                       MI_COM_PHY_ADDR_MASK);
859         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
860                       MI_COM_REG_ADDR_MASK);
861         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
862
863         tw32_f(MAC_MI_COM, frame_val);
864
865         loops = PHY_BUSY_LOOPS;
866         while (loops != 0) {
867                 udelay(10);
868                 frame_val = tr32(MAC_MI_COM);
869
870                 if ((frame_val & MI_COM_BUSY) == 0) {
871                         udelay(5);
872                         frame_val = tr32(MAC_MI_COM);
873                         break;
874                 }
875                 loops -= 1;
876         }
877
878         ret = -EBUSY;
879         if (loops != 0) {
880                 *val = frame_val & MI_COM_DATA_MASK;
881                 ret = 0;
882         }
883
884         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
885                 tw32_f(MAC_MI_MODE, tp->mi_mode);
886                 udelay(80);
887         }
888
889         return ret;
890 }
891
892 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
893 {
894         u32 frame_val;
895         unsigned int loops;
896         int ret;
897
898         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
899             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
900                 return 0;
901
902         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
903                 tw32_f(MAC_MI_MODE,
904                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
905                 udelay(80);
906         }
907
908         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
909                       MI_COM_PHY_ADDR_MASK);
910         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
911                       MI_COM_REG_ADDR_MASK);
912         frame_val |= (val & MI_COM_DATA_MASK);
913         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
914
915         tw32_f(MAC_MI_COM, frame_val);
916
917         loops = PHY_BUSY_LOOPS;
918         while (loops != 0) {
919                 udelay(10);
920                 frame_val = tr32(MAC_MI_COM);
921                 if ((frame_val & MI_COM_BUSY) == 0) {
922                         udelay(5);
923                         frame_val = tr32(MAC_MI_COM);
924                         break;
925                 }
926                 loops -= 1;
927         }
928
929         ret = -EBUSY;
930         if (loops != 0)
931                 ret = 0;
932
933         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
934                 tw32_f(MAC_MI_MODE, tp->mi_mode);
935                 udelay(80);
936         }
937
938         return ret;
939 }
940
941 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
942 {
943         int err;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
946         if (err)
947                 goto done;
948
949         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
950         if (err)
951                 goto done;
952
953         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
954                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
955         if (err)
956                 goto done;
957
958         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
959
960 done:
961         return err;
962 }
963
964 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
965 {
966         int err;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
969         if (err)
970                 goto done;
971
972         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
973         if (err)
974                 goto done;
975
976         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
977                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
978         if (err)
979                 goto done;
980
981         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
982
983 done:
984         return err;
985 }
986
987 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
988 {
989         int err;
990
991         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
992         if (!err)
993                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
994
995         return err;
996 }
997
998 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
999 {
1000         int err;
1001
1002         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1003         if (!err)
1004                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1005
1006         return err;
1007 }
1008
1009 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1010 {
1011         int err;
1012
1013         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1014                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1015                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1016         if (!err)
1017                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1018
1019         return err;
1020 }
1021
1022 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1023 {
1024         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1025                 set |= MII_TG3_AUXCTL_MISC_WREN;
1026
1027         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1028 }
1029
1030 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1031         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1032                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1033                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1034
1035 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1036         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1037                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1038
1039 static int tg3_bmcr_reset(struct tg3 *tp)
1040 {
1041         u32 phy_control;
1042         int limit, err;
1043
1044         /* OK, reset it, and poll the BMCR_RESET bit until it
1045          * clears or we time out.
1046          */
1047         phy_control = BMCR_RESET;
1048         err = tg3_writephy(tp, MII_BMCR, phy_control);
1049         if (err != 0)
1050                 return -EBUSY;
1051
1052         limit = 5000;
1053         while (limit--) {
1054                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1055                 if (err != 0)
1056                         return -EBUSY;
1057
1058                 if ((phy_control & BMCR_RESET) == 0) {
1059                         udelay(40);
1060                         break;
1061                 }
1062                 udelay(10);
1063         }
1064         if (limit < 0)
1065                 return -EBUSY;
1066
1067         return 0;
1068 }
1069
1070 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1071 {
1072         struct tg3 *tp = bp->priv;
1073         u32 val;
1074
1075         spin_lock_bh(&tp->lock);
1076
1077         if (tg3_readphy(tp, reg, &val))
1078                 val = -EIO;
1079
1080         spin_unlock_bh(&tp->lock);
1081
1082         return val;
1083 }
1084
1085 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1086 {
1087         struct tg3 *tp = bp->priv;
1088         u32 ret = 0;
1089
1090         spin_lock_bh(&tp->lock);
1091
1092         if (tg3_writephy(tp, reg, val))
1093                 ret = -EIO;
1094
1095         spin_unlock_bh(&tp->lock);
1096
1097         return ret;
1098 }
1099
1100 static int tg3_mdio_reset(struct mii_bus *bp)
1101 {
1102         return 0;
1103 }
1104
1105 static void tg3_mdio_config_5785(struct tg3 *tp)
1106 {
1107         u32 val;
1108         struct phy_device *phydev;
1109
1110         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1111         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1112         case PHY_ID_BCM50610:
1113         case PHY_ID_BCM50610M:
1114                 val = MAC_PHYCFG2_50610_LED_MODES;
1115                 break;
1116         case PHY_ID_BCMAC131:
1117                 val = MAC_PHYCFG2_AC131_LED_MODES;
1118                 break;
1119         case PHY_ID_RTL8211C:
1120                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1121                 break;
1122         case PHY_ID_RTL8201E:
1123                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1124                 break;
1125         default:
1126                 return;
1127         }
1128
1129         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1130                 tw32(MAC_PHYCFG2, val);
1131
1132                 val = tr32(MAC_PHYCFG1);
1133                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1134                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1135                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1136                 tw32(MAC_PHYCFG1, val);
1137
1138                 return;
1139         }
1140
1141         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1142                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1143                        MAC_PHYCFG2_FMODE_MASK_MASK |
1144                        MAC_PHYCFG2_GMODE_MASK_MASK |
1145                        MAC_PHYCFG2_ACT_MASK_MASK   |
1146                        MAC_PHYCFG2_QUAL_MASK_MASK |
1147                        MAC_PHYCFG2_INBAND_ENABLE;
1148
1149         tw32(MAC_PHYCFG2, val);
1150
1151         val = tr32(MAC_PHYCFG1);
1152         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1153                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1154         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1155                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1156                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1157                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1158                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1159         }
1160         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1161                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1162         tw32(MAC_PHYCFG1, val);
1163
1164         val = tr32(MAC_EXT_RGMII_MODE);
1165         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1166                  MAC_RGMII_MODE_RX_QUALITY |
1167                  MAC_RGMII_MODE_RX_ACTIVITY |
1168                  MAC_RGMII_MODE_RX_ENG_DET |
1169                  MAC_RGMII_MODE_TX_ENABLE |
1170                  MAC_RGMII_MODE_TX_LOWPWR |
1171                  MAC_RGMII_MODE_TX_RESET);
1172         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1173                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1174                         val |= MAC_RGMII_MODE_RX_INT_B |
1175                                MAC_RGMII_MODE_RX_QUALITY |
1176                                MAC_RGMII_MODE_RX_ACTIVITY |
1177                                MAC_RGMII_MODE_RX_ENG_DET;
1178                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1179                         val |= MAC_RGMII_MODE_TX_ENABLE |
1180                                MAC_RGMII_MODE_TX_LOWPWR |
1181                                MAC_RGMII_MODE_TX_RESET;
1182         }
1183         tw32(MAC_EXT_RGMII_MODE, val);
1184 }
1185
1186 static void tg3_mdio_start(struct tg3 *tp)
1187 {
1188         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1189         tw32_f(MAC_MI_MODE, tp->mi_mode);
1190         udelay(80);
1191
1192         if (tg3_flag(tp, MDIOBUS_INITED) &&
1193             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1194                 tg3_mdio_config_5785(tp);
1195 }
1196
1197 static int tg3_mdio_init(struct tg3 *tp)
1198 {
1199         int i;
1200         u32 reg;
1201         struct phy_device *phydev;
1202
1203         if (tg3_flag(tp, 5717_PLUS)) {
1204                 u32 is_serdes;
1205
1206                 tp->phy_addr = tp->pci_fn + 1;
1207
1208                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1209                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1210                 else
1211                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1212                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1213                 if (is_serdes)
1214                         tp->phy_addr += 7;
1215         } else
1216                 tp->phy_addr = TG3_PHY_MII_ADDR;
1217
1218         tg3_mdio_start(tp);
1219
1220         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1221                 return 0;
1222
1223         tp->mdio_bus = mdiobus_alloc();
1224         if (tp->mdio_bus == NULL)
1225                 return -ENOMEM;
1226
1227         tp->mdio_bus->name     = "tg3 mdio bus";
1228         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1229                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1230         tp->mdio_bus->priv     = tp;
1231         tp->mdio_bus->parent   = &tp->pdev->dev;
1232         tp->mdio_bus->read     = &tg3_mdio_read;
1233         tp->mdio_bus->write    = &tg3_mdio_write;
1234         tp->mdio_bus->reset    = &tg3_mdio_reset;
1235         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1236         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1237
1238         for (i = 0; i < PHY_MAX_ADDR; i++)
1239                 tp->mdio_bus->irq[i] = PHY_POLL;
1240
1241         /* The bus registration will look for all the PHYs on the mdio bus.
1242          * Unfortunately, it does not ensure the PHY is powered up before
1243          * accessing the PHY ID registers.  A chip reset is the
1244          * quickest way to bring the device back to an operational state..
1245          */
1246         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1247                 tg3_bmcr_reset(tp);
1248
1249         i = mdiobus_register(tp->mdio_bus);
1250         if (i) {
1251                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1252                 mdiobus_free(tp->mdio_bus);
1253                 return i;
1254         }
1255
1256         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1257
1258         if (!phydev || !phydev->drv) {
1259                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1260                 mdiobus_unregister(tp->mdio_bus);
1261                 mdiobus_free(tp->mdio_bus);
1262                 return -ENODEV;
1263         }
1264
1265         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1266         case PHY_ID_BCM57780:
1267                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1268                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1269                 break;
1270         case PHY_ID_BCM50610:
1271         case PHY_ID_BCM50610M:
1272                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1273                                      PHY_BRCM_RX_REFCLK_UNUSED |
1274                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1275                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1276                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1277                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1278                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1279                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1280                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1281                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1282                 /* fallthru */
1283         case PHY_ID_RTL8211C:
1284                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1285                 break;
1286         case PHY_ID_RTL8201E:
1287         case PHY_ID_BCMAC131:
1288                 phydev->interface = PHY_INTERFACE_MODE_MII;
1289                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1290                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1291                 break;
1292         }
1293
1294         tg3_flag_set(tp, MDIOBUS_INITED);
1295
1296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298
1299         return 0;
1300 }
1301
1302 static void tg3_mdio_fini(struct tg3 *tp)
1303 {
1304         if (tg3_flag(tp, MDIOBUS_INITED)) {
1305                 tg3_flag_clear(tp, MDIOBUS_INITED);
1306                 mdiobus_unregister(tp->mdio_bus);
1307                 mdiobus_free(tp->mdio_bus);
1308         }
1309 }
1310
1311 /* tp->lock is held. */
1312 static inline void tg3_generate_fw_event(struct tg3 *tp)
1313 {
1314         u32 val;
1315
1316         val = tr32(GRC_RX_CPU_EVENT);
1317         val |= GRC_RX_CPU_DRIVER_EVENT;
1318         tw32_f(GRC_RX_CPU_EVENT, val);
1319
1320         tp->last_event_jiffies = jiffies;
1321 }
1322
1323 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1324
1325 /* tp->lock is held. */
1326 static void tg3_wait_for_event_ack(struct tg3 *tp)
1327 {
1328         int i;
1329         unsigned int delay_cnt;
1330         long time_remain;
1331
1332         /* If enough time has passed, no wait is necessary. */
1333         time_remain = (long)(tp->last_event_jiffies + 1 +
1334                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1335                       (long)jiffies;
1336         if (time_remain < 0)
1337                 return;
1338
1339         /* Check if we can shorten the wait time. */
1340         delay_cnt = jiffies_to_usecs(time_remain);
1341         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1342                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1343         delay_cnt = (delay_cnt >> 3) + 1;
1344
1345         for (i = 0; i < delay_cnt; i++) {
1346                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1347                         break;
1348                 udelay(8);
1349         }
1350 }
1351
1352 /* tp->lock is held. */
1353 static void tg3_ump_link_report(struct tg3 *tp)
1354 {
1355         u32 reg;
1356         u32 val;
1357
1358         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1359                 return;
1360
1361         tg3_wait_for_event_ack(tp);
1362
1363         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1364
1365         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1366
1367         val = 0;
1368         if (!tg3_readphy(tp, MII_BMCR, &reg))
1369                 val = reg << 16;
1370         if (!tg3_readphy(tp, MII_BMSR, &reg))
1371                 val |= (reg & 0xffff);
1372         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1373
1374         val = 0;
1375         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1376                 val = reg << 16;
1377         if (!tg3_readphy(tp, MII_LPA, &reg))
1378                 val |= (reg & 0xffff);
1379         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1380
1381         val = 0;
1382         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1383                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1384                         val = reg << 16;
1385                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1386                         val |= (reg & 0xffff);
1387         }
1388         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1389
1390         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1391                 val = reg << 16;
1392         else
1393                 val = 0;
1394         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1395
1396         tg3_generate_fw_event(tp);
1397 }
1398
1399 static void tg3_link_report(struct tg3 *tp)
1400 {
1401         if (!netif_carrier_ok(tp->dev)) {
1402                 netif_info(tp, link, tp->dev, "Link is down\n");
1403                 tg3_ump_link_report(tp);
1404         } else if (netif_msg_link(tp)) {
1405                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1406                             (tp->link_config.active_speed == SPEED_1000 ?
1407                              1000 :
1408                              (tp->link_config.active_speed == SPEED_100 ?
1409                               100 : 10)),
1410                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1411                              "full" : "half"));
1412
1413                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1414                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1415                             "on" : "off",
1416                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1417                             "on" : "off");
1418
1419                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1420                         netdev_info(tp->dev, "EEE is %s\n",
1421                                     tp->setlpicnt ? "enabled" : "disabled");
1422
1423                 tg3_ump_link_report(tp);
1424         }
1425 }
1426
1427 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1428 {
1429         u16 miireg;
1430
1431         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1432                 miireg = ADVERTISE_PAUSE_CAP;
1433         else if (flow_ctrl & FLOW_CTRL_TX)
1434                 miireg = ADVERTISE_PAUSE_ASYM;
1435         else if (flow_ctrl & FLOW_CTRL_RX)
1436                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1437         else
1438                 miireg = 0;
1439
1440         return miireg;
1441 }
1442
1443 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1444 {
1445         u16 miireg;
1446
1447         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1448                 miireg = ADVERTISE_1000XPAUSE;
1449         else if (flow_ctrl & FLOW_CTRL_TX)
1450                 miireg = ADVERTISE_1000XPSE_ASYM;
1451         else if (flow_ctrl & FLOW_CTRL_RX)
1452                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1453         else
1454                 miireg = 0;
1455
1456         return miireg;
1457 }
1458
1459 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1460 {
1461         u8 cap = 0;
1462
1463         if (lcladv & ADVERTISE_1000XPAUSE) {
1464                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1465                         if (rmtadv & LPA_1000XPAUSE)
1466                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1467                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1468                                 cap = FLOW_CTRL_RX;
1469                 } else {
1470                         if (rmtadv & LPA_1000XPAUSE)
1471                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1472                 }
1473         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1474                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1475                         cap = FLOW_CTRL_TX;
1476         }
1477
1478         return cap;
1479 }
1480
1481 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1482 {
1483         u8 autoneg;
1484         u8 flowctrl = 0;
1485         u32 old_rx_mode = tp->rx_mode;
1486         u32 old_tx_mode = tp->tx_mode;
1487
1488         if (tg3_flag(tp, USE_PHYLIB))
1489                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1490         else
1491                 autoneg = tp->link_config.autoneg;
1492
1493         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1494                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1495                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1496                 else
1497                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1498         } else
1499                 flowctrl = tp->link_config.flowctrl;
1500
1501         tp->link_config.active_flowctrl = flowctrl;
1502
1503         if (flowctrl & FLOW_CTRL_RX)
1504                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_rx_mode != tp->rx_mode)
1509                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1510
1511         if (flowctrl & FLOW_CTRL_TX)
1512                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1513         else
1514                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1515
1516         if (old_tx_mode != tp->tx_mode)
1517                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1518 }
1519
1520 static void tg3_adjust_link(struct net_device *dev)
1521 {
1522         u8 oldflowctrl, linkmesg = 0;
1523         u32 mac_mode, lcl_adv, rmt_adv;
1524         struct tg3 *tp = netdev_priv(dev);
1525         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1526
1527         spin_lock_bh(&tp->lock);
1528
1529         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1530                                     MAC_MODE_HALF_DUPLEX);
1531
1532         oldflowctrl = tp->link_config.active_flowctrl;
1533
1534         if (phydev->link) {
1535                 lcl_adv = 0;
1536                 rmt_adv = 0;
1537
1538                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1539                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1540                 else if (phydev->speed == SPEED_1000 ||
1541                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1542                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1543                 else
1544                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1545
1546                 if (phydev->duplex == DUPLEX_HALF)
1547                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1548                 else {
1549                         lcl_adv = tg3_advert_flowctrl_1000T(
1550                                   tp->link_config.flowctrl);
1551
1552                         if (phydev->pause)
1553                                 rmt_adv = LPA_PAUSE_CAP;
1554                         if (phydev->asym_pause)
1555                                 rmt_adv |= LPA_PAUSE_ASYM;
1556                 }
1557
1558                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1559         } else
1560                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1561
1562         if (mac_mode != tp->mac_mode) {
1563                 tp->mac_mode = mac_mode;
1564                 tw32_f(MAC_MODE, tp->mac_mode);
1565                 udelay(40);
1566         }
1567
1568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1569                 if (phydev->speed == SPEED_10)
1570                         tw32(MAC_MI_STAT,
1571                              MAC_MI_STAT_10MBPS_MODE |
1572                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1573                 else
1574                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1575         }
1576
1577         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1578                 tw32(MAC_TX_LENGTHS,
1579                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1580                       (6 << TX_LENGTHS_IPG_SHIFT) |
1581                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1582         else
1583                 tw32(MAC_TX_LENGTHS,
1584                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1585                       (6 << TX_LENGTHS_IPG_SHIFT) |
1586                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1587
1588         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1589             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1590             phydev->speed != tp->link_config.active_speed ||
1591             phydev->duplex != tp->link_config.active_duplex ||
1592             oldflowctrl != tp->link_config.active_flowctrl)
1593                 linkmesg = 1;
1594
1595         tp->link_config.active_speed = phydev->speed;
1596         tp->link_config.active_duplex = phydev->duplex;
1597
1598         spin_unlock_bh(&tp->lock);
1599
1600         if (linkmesg)
1601                 tg3_link_report(tp);
1602 }
1603
1604 static int tg3_phy_init(struct tg3 *tp)
1605 {
1606         struct phy_device *phydev;
1607
1608         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1609                 return 0;
1610
1611         /* Bring the PHY back to a known state. */
1612         tg3_bmcr_reset(tp);
1613
1614         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1615
1616         /* Attach the MAC to the PHY. */
1617         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1618                              phydev->dev_flags, phydev->interface);
1619         if (IS_ERR(phydev)) {
1620                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1621                 return PTR_ERR(phydev);
1622         }
1623
1624         /* Mask with MAC supported features. */
1625         switch (phydev->interface) {
1626         case PHY_INTERFACE_MODE_GMII:
1627         case PHY_INTERFACE_MODE_RGMII:
1628                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1629                         phydev->supported &= (PHY_GBIT_FEATURES |
1630                                               SUPPORTED_Pause |
1631                                               SUPPORTED_Asym_Pause);
1632                         break;
1633                 }
1634                 /* fallthru */
1635         case PHY_INTERFACE_MODE_MII:
1636                 phydev->supported &= (PHY_BASIC_FEATURES |
1637                                       SUPPORTED_Pause |
1638                                       SUPPORTED_Asym_Pause);
1639                 break;
1640         default:
1641                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1642                 return -EINVAL;
1643         }
1644
1645         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1646
1647         phydev->advertising = phydev->supported;
1648
1649         return 0;
1650 }
1651
1652 static void tg3_phy_start(struct tg3 *tp)
1653 {
1654         struct phy_device *phydev;
1655
1656         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1657                 return;
1658
1659         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1660
1661         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1662                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1663                 phydev->speed = tp->link_config.orig_speed;
1664                 phydev->duplex = tp->link_config.orig_duplex;
1665                 phydev->autoneg = tp->link_config.orig_autoneg;
1666                 phydev->advertising = tp->link_config.orig_advertising;
1667         }
1668
1669         phy_start(phydev);
1670
1671         phy_start_aneg(phydev);
1672 }
1673
1674 static void tg3_phy_stop(struct tg3 *tp)
1675 {
1676         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1677                 return;
1678
1679         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1680 }
1681
1682 static void tg3_phy_fini(struct tg3 *tp)
1683 {
1684         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1685                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1686                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1687         }
1688 }
1689
1690 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1691 {
1692         int err;
1693         u32 val;
1694
1695         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1696                 return 0;
1697
1698         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1699                 /* Cannot do read-modify-write on 5401 */
1700                 err = tg3_phy_auxctl_write(tp,
1701                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1702                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1703                                            0x4c20);
1704                 goto done;
1705         }
1706
1707         err = tg3_phy_auxctl_read(tp,
1708                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1709         if (err)
1710                 return err;
1711
1712         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1713         err = tg3_phy_auxctl_write(tp,
1714                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1715
1716 done:
1717         return err;
1718 }
1719
1720 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1721 {
1722         u32 phytest;
1723
1724         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1725                 u32 phy;
1726
1727                 tg3_writephy(tp, MII_TG3_FET_TEST,
1728                              phytest | MII_TG3_FET_SHADOW_EN);
1729                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1730                         if (enable)
1731                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1732                         else
1733                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1734                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1735                 }
1736                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1737         }
1738 }
1739
1740 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1741 {
1742         u32 reg;
1743
1744         if (!tg3_flag(tp, 5705_PLUS) ||
1745             (tg3_flag(tp, 5717_PLUS) &&
1746              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1747                 return;
1748
1749         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1750                 tg3_phy_fet_toggle_apd(tp, enable);
1751                 return;
1752         }
1753
1754         reg = MII_TG3_MISC_SHDW_WREN |
1755               MII_TG3_MISC_SHDW_SCR5_SEL |
1756               MII_TG3_MISC_SHDW_SCR5_LPED |
1757               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1758               MII_TG3_MISC_SHDW_SCR5_SDTL |
1759               MII_TG3_MISC_SHDW_SCR5_C125OE;
1760         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1761                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1762
1763         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1764
1765
1766         reg = MII_TG3_MISC_SHDW_WREN |
1767               MII_TG3_MISC_SHDW_APD_SEL |
1768               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1769         if (enable)
1770                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1771
1772         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1773 }
1774
1775 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1776 {
1777         u32 phy;
1778
1779         if (!tg3_flag(tp, 5705_PLUS) ||
1780             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1781                 return;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1784                 u32 ephy;
1785
1786                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1787                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1788
1789                         tg3_writephy(tp, MII_TG3_FET_TEST,
1790                                      ephy | MII_TG3_FET_SHADOW_EN);
1791                         if (!tg3_readphy(tp, reg, &phy)) {
1792                                 if (enable)
1793                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1794                                 else
1795                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1796                                 tg3_writephy(tp, reg, phy);
1797                         }
1798                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1799                 }
1800         } else {
1801                 int ret;
1802
1803                 ret = tg3_phy_auxctl_read(tp,
1804                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1805                 if (!ret) {
1806                         if (enable)
1807                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1808                         else
1809                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1810                         tg3_phy_auxctl_write(tp,
1811                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1812                 }
1813         }
1814 }
1815
1816 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1817 {
1818         int ret;
1819         u32 val;
1820
1821         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1822                 return;
1823
1824         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1825         if (!ret)
1826                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1827                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1828 }
1829
1830 static void tg3_phy_apply_otp(struct tg3 *tp)
1831 {
1832         u32 otp, phy;
1833
1834         if (!tp->phy_otp)
1835                 return;
1836
1837         otp = tp->phy_otp;
1838
1839         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1840                 return;
1841
1842         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1843         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1844         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1845
1846         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1847               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1848         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1849
1850         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1851         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1852         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1853
1854         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1855         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1856
1857         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1858         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1859
1860         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1861               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1862         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1863
1864         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865 }
1866
1867 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1868 {
1869         u32 val;
1870
1871         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1872                 return;
1873
1874         tp->setlpicnt = 0;
1875
1876         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1877             current_link_up == 1 &&
1878             tp->link_config.active_duplex == DUPLEX_FULL &&
1879             (tp->link_config.active_speed == SPEED_100 ||
1880              tp->link_config.active_speed == SPEED_1000)) {
1881                 u32 eeectl;
1882
1883                 if (tp->link_config.active_speed == SPEED_1000)
1884                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1885                 else
1886                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1887
1888                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1889
1890                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1891                                   TG3_CL45_D7_EEERES_STAT, &val);
1892
1893                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1894                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1895                         tp->setlpicnt = 2;
1896         }
1897
1898         if (!tp->setlpicnt) {
1899                 if (current_link_up == 1 &&
1900                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1901                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1902                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1903                 }
1904
1905                 val = tr32(TG3_CPMU_EEE_MODE);
1906                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1907         }
1908 }
1909
1910 static void tg3_phy_eee_enable(struct tg3 *tp)
1911 {
1912         u32 val;
1913
1914         if (tp->link_config.active_speed == SPEED_1000 &&
1915             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1916              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1917              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1918             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1919                 val = MII_TG3_DSP_TAP26_ALNOKO |
1920                       MII_TG3_DSP_TAP26_RMRXSTO;
1921                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1922                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1923         }
1924
1925         val = tr32(TG3_CPMU_EEE_MODE);
1926         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1927 }
1928
1929 static int tg3_wait_macro_done(struct tg3 *tp)
1930 {
1931         int limit = 100;
1932
1933         while (limit--) {
1934                 u32 tmp32;
1935
1936                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1937                         if ((tmp32 & 0x1000) == 0)
1938                                 break;
1939                 }
1940         }
1941         if (limit < 0)
1942                 return -EBUSY;
1943
1944         return 0;
1945 }
1946
1947 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1948 {
1949         static const u32 test_pat[4][6] = {
1950         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1951         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1952         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1953         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1954         };
1955         int chan;
1956
1957         for (chan = 0; chan < 4; chan++) {
1958                 int i;
1959
1960                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1961                              (chan * 0x2000) | 0x0200);
1962                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1963
1964                 for (i = 0; i < 6; i++)
1965                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1966                                      test_pat[chan][i]);
1967
1968                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1969                 if (tg3_wait_macro_done(tp)) {
1970                         *resetp = 1;
1971                         return -EBUSY;
1972                 }
1973
1974                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1975                              (chan * 0x2000) | 0x0200);
1976                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1977                 if (tg3_wait_macro_done(tp)) {
1978                         *resetp = 1;
1979                         return -EBUSY;
1980                 }
1981
1982                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1983                 if (tg3_wait_macro_done(tp)) {
1984                         *resetp = 1;
1985                         return -EBUSY;
1986                 }
1987
1988                 for (i = 0; i < 6; i += 2) {
1989                         u32 low, high;
1990
1991                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1992                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1993                             tg3_wait_macro_done(tp)) {
1994                                 *resetp = 1;
1995                                 return -EBUSY;
1996                         }
1997                         low &= 0x7fff;
1998                         high &= 0x000f;
1999                         if (low != test_pat[chan][i] ||
2000                             high != test_pat[chan][i+1]) {
2001                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2002                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2003                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2004
2005                                 return -EBUSY;
2006                         }
2007                 }
2008         }
2009
2010         return 0;
2011 }
2012
2013 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2014 {
2015         int chan;
2016
2017         for (chan = 0; chan < 4; chan++) {
2018                 int i;
2019
2020                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2021                              (chan * 0x2000) | 0x0200);
2022                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2023                 for (i = 0; i < 6; i++)
2024                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2025                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2026                 if (tg3_wait_macro_done(tp))
2027                         return -EBUSY;
2028         }
2029
2030         return 0;
2031 }
2032
2033 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2034 {
2035         u32 reg32, phy9_orig;
2036         int retries, do_phy_reset, err;
2037
2038         retries = 10;
2039         do_phy_reset = 1;
2040         do {
2041                 if (do_phy_reset) {
2042                         err = tg3_bmcr_reset(tp);
2043                         if (err)
2044                                 return err;
2045                         do_phy_reset = 0;
2046                 }
2047
2048                 /* Disable transmitter and interrupt.  */
2049                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2050                         continue;
2051
2052                 reg32 |= 0x3000;
2053                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2054
2055                 /* Set full-duplex, 1000 mbps.  */
2056                 tg3_writephy(tp, MII_BMCR,
2057                              BMCR_FULLDPLX | BMCR_SPEED1000);
2058
2059                 /* Set to master mode.  */
2060                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2061                         continue;
2062
2063                 tg3_writephy(tp, MII_CTRL1000,
2064                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2065
2066                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2067                 if (err)
2068                         return err;
2069
2070                 /* Block the PHY control access.  */
2071                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2072
2073                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2074                 if (!err)
2075                         break;
2076         } while (--retries);
2077
2078         err = tg3_phy_reset_chanpat(tp);
2079         if (err)
2080                 return err;
2081
2082         tg3_phydsp_write(tp, 0x8005, 0x0000);
2083
2084         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2085         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2086
2087         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2088
2089         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2090
2091         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2092                 reg32 &= ~0x3000;
2093                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2094         } else if (!err)
2095                 err = -EBUSY;
2096
2097         return err;
2098 }
2099
2100 /* This will reset the tigon3 PHY if there is no valid
2101  * link unless the FORCE argument is non-zero.
2102  */
2103 static int tg3_phy_reset(struct tg3 *tp)
2104 {
2105         u32 val, cpmuctrl;
2106         int err;
2107
2108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2109                 val = tr32(GRC_MISC_CFG);
2110                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2111                 udelay(40);
2112         }
2113         err  = tg3_readphy(tp, MII_BMSR, &val);
2114         err |= tg3_readphy(tp, MII_BMSR, &val);
2115         if (err != 0)
2116                 return -EBUSY;
2117
2118         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2119                 netif_carrier_off(tp->dev);
2120                 tg3_link_report(tp);
2121         }
2122
2123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2125             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2126                 err = tg3_phy_reset_5703_4_5(tp);
2127                 if (err)
2128                         return err;
2129                 goto out;
2130         }
2131
2132         cpmuctrl = 0;
2133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2134             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2135                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2136                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2137                         tw32(TG3_CPMU_CTRL,
2138                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2139         }
2140
2141         err = tg3_bmcr_reset(tp);
2142         if (err)
2143                 return err;
2144
2145         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2146                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2147                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2148
2149                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2150         }
2151
2152         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2153             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2154                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2155                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2156                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2157                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2158                         udelay(40);
2159                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2160                 }
2161         }
2162
2163         if (tg3_flag(tp, 5717_PLUS) &&
2164             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2165                 return 0;
2166
2167         tg3_phy_apply_otp(tp);
2168
2169         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2170                 tg3_phy_toggle_apd(tp, true);
2171         else
2172                 tg3_phy_toggle_apd(tp, false);
2173
2174 out:
2175         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2176             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2177                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2178                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2179                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2180         }
2181
2182         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2183                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2184                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2185         }
2186
2187         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2188                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2189                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2190                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2191                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2192                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2193                 }
2194         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2195                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2196                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2197                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2198                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2199                                 tg3_writephy(tp, MII_TG3_TEST1,
2200                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2201                         } else
2202                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2203
2204                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2205                 }
2206         }
2207
2208         /* Set Extended packet length bit (bit 14) on all chips that */
2209         /* support jumbo frames */
2210         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2211                 /* Cannot do read-modify-write on 5401 */
2212                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2213         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2214                 /* Set bit 14 with read-modify-write to preserve other bits */
2215                 err = tg3_phy_auxctl_read(tp,
2216                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2217                 if (!err)
2218                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2219                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2220         }
2221
2222         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2223          * jumbo frames transmission.
2224          */
2225         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2226                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2227                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2228                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2229         }
2230
2231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2232                 /* adjust output voltage */
2233                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2234         }
2235
2236         tg3_phy_toggle_automdix(tp, 1);
2237         tg3_phy_set_wirespeed(tp);
2238         return 0;
2239 }
2240
2241 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2242 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2243 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2244                                           TG3_GPIO_MSG_NEED_VAUX)
2245 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2246         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2247          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2248          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2249          (TG3_GPIO_MSG_DRVR_PRES << 12))
2250
2251 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2252         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2253          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2254          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2255          (TG3_GPIO_MSG_NEED_VAUX << 12))
2256
2257 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2258 {
2259         u32 status, shift;
2260
2261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2262             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2263                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2264         else
2265                 status = tr32(TG3_CPMU_DRV_STATUS);
2266
2267         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2268         status &= ~(TG3_GPIO_MSG_MASK << shift);
2269         status |= (newstat << shift);
2270
2271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2273                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2274         else
2275                 tw32(TG3_CPMU_DRV_STATUS, status);
2276
2277         return status >> TG3_APE_GPIO_MSG_SHIFT;
2278 }
2279
2280 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2281 {
2282         if (!tg3_flag(tp, IS_NIC))
2283                 return 0;
2284
2285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2288                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2289                         return -EIO;
2290
2291                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2292
2293                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2294                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2295
2296                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2297         } else {
2298                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2299                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2300         }
2301
2302         return 0;
2303 }
2304
2305 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2306 {
2307         u32 grc_local_ctrl;
2308
2309         if (!tg3_flag(tp, IS_NIC) ||
2310             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2311             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2312                 return;
2313
2314         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2315
2316         tw32_wait_f(GRC_LOCAL_CTRL,
2317                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2318                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2319
2320         tw32_wait_f(GRC_LOCAL_CTRL,
2321                     grc_local_ctrl,
2322                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2323
2324         tw32_wait_f(GRC_LOCAL_CTRL,
2325                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2326                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2327 }
2328
2329 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2330 {
2331         if (!tg3_flag(tp, IS_NIC))
2332                 return;
2333
2334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2335             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2336                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2337                             (GRC_LCLCTRL_GPIO_OE0 |
2338                              GRC_LCLCTRL_GPIO_OE1 |
2339                              GRC_LCLCTRL_GPIO_OE2 |
2340                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2341                              GRC_LCLCTRL_GPIO_OUTPUT1),
2342                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2343         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2344                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2345                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2346                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2347                                      GRC_LCLCTRL_GPIO_OE1 |
2348                                      GRC_LCLCTRL_GPIO_OE2 |
2349                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2350                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2351                                      tp->grc_local_ctrl;
2352                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2353                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2354
2355                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2356                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2357                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2360                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2361                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2362         } else {
2363                 u32 no_gpio2;
2364                 u32 grc_local_ctrl = 0;
2365
2366                 /* Workaround to prevent overdrawing Amps. */
2367                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2368                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2369                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2370                                     grc_local_ctrl,
2371                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2372                 }
2373
2374                 /* On 5753 and variants, GPIO2 cannot be used. */
2375                 no_gpio2 = tp->nic_sram_data_cfg &
2376                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2377
2378                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2379                                   GRC_LCLCTRL_GPIO_OE1 |
2380                                   GRC_LCLCTRL_GPIO_OE2 |
2381                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2382                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2383                 if (no_gpio2) {
2384                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2385                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2386                 }
2387                 tw32_wait_f(GRC_LOCAL_CTRL,
2388                             tp->grc_local_ctrl | grc_local_ctrl,
2389                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2390
2391                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2392
2393                 tw32_wait_f(GRC_LOCAL_CTRL,
2394                             tp->grc_local_ctrl | grc_local_ctrl,
2395                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2396
2397                 if (!no_gpio2) {
2398                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2399                         tw32_wait_f(GRC_LOCAL_CTRL,
2400                                     tp->grc_local_ctrl | grc_local_ctrl,
2401                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2402                 }
2403         }
2404 }
2405
2406 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2407 {
2408         u32 msg = 0;
2409
2410         /* Serialize power state transitions */
2411         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2412                 return;
2413
2414         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2415                 msg = TG3_GPIO_MSG_NEED_VAUX;
2416
2417         msg = tg3_set_function_status(tp, msg);
2418
2419         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2420                 goto done;
2421
2422         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2423                 tg3_pwrsrc_switch_to_vaux(tp);
2424         else
2425                 tg3_pwrsrc_die_with_vmain(tp);
2426
2427 done:
2428         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2429 }
2430
2431 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2432 {
2433         bool need_vaux = false;
2434
2435         /* The GPIOs do something completely different on 57765. */
2436         if (!tg3_flag(tp, IS_NIC) ||
2437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2438                 return;
2439
2440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2443                 tg3_frob_aux_power_5717(tp, include_wol ?
2444                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2445                 return;
2446         }
2447
2448         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2449                 struct net_device *dev_peer;
2450
2451                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2452
2453                 /* remove_one() may have been run on the peer. */
2454                 if (dev_peer) {
2455                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2456
2457                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2458                                 return;
2459
2460                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2461                             tg3_flag(tp_peer, ENABLE_ASF))
2462                                 need_vaux = true;
2463                 }
2464         }
2465
2466         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2467             tg3_flag(tp, ENABLE_ASF))
2468                 need_vaux = true;
2469
2470         if (need_vaux)
2471                 tg3_pwrsrc_switch_to_vaux(tp);
2472         else
2473                 tg3_pwrsrc_die_with_vmain(tp);
2474 }
2475
2476 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2477 {
2478         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2479                 return 1;
2480         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2481                 if (speed != SPEED_10)
2482                         return 1;
2483         } else if (speed == SPEED_10)
2484                 return 1;
2485
2486         return 0;
2487 }
2488
2489 static int tg3_setup_phy(struct tg3 *, int);
2490
2491 #define RESET_KIND_SHUTDOWN     0
2492 #define RESET_KIND_INIT         1
2493 #define RESET_KIND_SUSPEND      2
2494
2495 static void tg3_write_sig_post_reset(struct tg3 *, int);
2496 static int tg3_halt_cpu(struct tg3 *, u32);
2497
2498 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2499 {
2500         u32 val;
2501
2502         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2503                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2504                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2505                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2506
2507                         sg_dig_ctrl |=
2508                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2509                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2510                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2511                 }
2512                 return;
2513         }
2514
2515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2516                 tg3_bmcr_reset(tp);
2517                 val = tr32(GRC_MISC_CFG);
2518                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2519                 udelay(40);
2520                 return;
2521         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2522                 u32 phytest;
2523                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2524                         u32 phy;
2525
2526                         tg3_writephy(tp, MII_ADVERTISE, 0);
2527                         tg3_writephy(tp, MII_BMCR,
2528                                      BMCR_ANENABLE | BMCR_ANRESTART);
2529
2530                         tg3_writephy(tp, MII_TG3_FET_TEST,
2531                                      phytest | MII_TG3_FET_SHADOW_EN);
2532                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2533                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2534                                 tg3_writephy(tp,
2535                                              MII_TG3_FET_SHDW_AUXMODE4,
2536                                              phy);
2537                         }
2538                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2539                 }
2540                 return;
2541         } else if (do_low_power) {
2542                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2543                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2544
2545                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2546                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2547                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2548                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2549         }
2550
2551         /* The PHY should not be powered down on some chips because
2552          * of bugs.
2553          */
2554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2556             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2557              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2558                 return;
2559
2560         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2561             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2562                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2563                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2564                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2565                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2566         }
2567
2568         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2569 }
2570
2571 /* tp->lock is held. */
2572 static int tg3_nvram_lock(struct tg3 *tp)
2573 {
2574         if (tg3_flag(tp, NVRAM)) {
2575                 int i;
2576
2577                 if (tp->nvram_lock_cnt == 0) {
2578                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2579                         for (i = 0; i < 8000; i++) {
2580                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2581                                         break;
2582                                 udelay(20);
2583                         }
2584                         if (i == 8000) {
2585                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2586                                 return -ENODEV;
2587                         }
2588                 }
2589                 tp->nvram_lock_cnt++;
2590         }
2591         return 0;
2592 }
2593
2594 /* tp->lock is held. */
2595 static void tg3_nvram_unlock(struct tg3 *tp)
2596 {
2597         if (tg3_flag(tp, NVRAM)) {
2598                 if (tp->nvram_lock_cnt > 0)
2599                         tp->nvram_lock_cnt--;
2600                 if (tp->nvram_lock_cnt == 0)
2601                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2602         }
2603 }
2604
2605 /* tp->lock is held. */
2606 static void tg3_enable_nvram_access(struct tg3 *tp)
2607 {
2608         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2609                 u32 nvaccess = tr32(NVRAM_ACCESS);
2610
2611                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2612         }
2613 }
2614
2615 /* tp->lock is held. */
2616 static void tg3_disable_nvram_access(struct tg3 *tp)
2617 {
2618         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2619                 u32 nvaccess = tr32(NVRAM_ACCESS);
2620
2621                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2622         }
2623 }
2624
2625 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2626                                         u32 offset, u32 *val)
2627 {
2628         u32 tmp;
2629         int i;
2630
2631         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2632                 return -EINVAL;
2633
2634         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2635                                         EEPROM_ADDR_DEVID_MASK |
2636                                         EEPROM_ADDR_READ);
2637         tw32(GRC_EEPROM_ADDR,
2638              tmp |
2639              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2640              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2641               EEPROM_ADDR_ADDR_MASK) |
2642              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2643
2644         for (i = 0; i < 1000; i++) {
2645                 tmp = tr32(GRC_EEPROM_ADDR);
2646
2647                 if (tmp & EEPROM_ADDR_COMPLETE)
2648                         break;
2649                 msleep(1);
2650         }
2651         if (!(tmp & EEPROM_ADDR_COMPLETE))
2652                 return -EBUSY;
2653
2654         tmp = tr32(GRC_EEPROM_DATA);
2655
2656         /*
2657          * The data will always be opposite the native endian
2658          * format.  Perform a blind byteswap to compensate.
2659          */
2660         *val = swab32(tmp);
2661
2662         return 0;
2663 }
2664
2665 #define NVRAM_CMD_TIMEOUT 10000
2666
2667 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2668 {
2669         int i;
2670
2671         tw32(NVRAM_CMD, nvram_cmd);
2672         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2673                 udelay(10);
2674                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2675                         udelay(10);
2676                         break;
2677                 }
2678         }
2679
2680         if (i == NVRAM_CMD_TIMEOUT)
2681                 return -EBUSY;
2682
2683         return 0;
2684 }
2685
2686 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2687 {
2688         if (tg3_flag(tp, NVRAM) &&
2689             tg3_flag(tp, NVRAM_BUFFERED) &&
2690             tg3_flag(tp, FLASH) &&
2691             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2692             (tp->nvram_jedecnum == JEDEC_ATMEL))
2693
2694                 addr = ((addr / tp->nvram_pagesize) <<
2695                         ATMEL_AT45DB0X1B_PAGE_POS) +
2696                        (addr % tp->nvram_pagesize);
2697
2698         return addr;
2699 }
2700
2701 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2702 {
2703         if (tg3_flag(tp, NVRAM) &&
2704             tg3_flag(tp, NVRAM_BUFFERED) &&
2705             tg3_flag(tp, FLASH) &&
2706             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2707             (tp->nvram_jedecnum == JEDEC_ATMEL))
2708
2709                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2710                         tp->nvram_pagesize) +
2711                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2712
2713         return addr;
2714 }
2715
2716 /* NOTE: Data read in from NVRAM is byteswapped according to
2717  * the byteswapping settings for all other register accesses.
2718  * tg3 devices are BE devices, so on a BE machine, the data
2719  * returned will be exactly as it is seen in NVRAM.  On a LE
2720  * machine, the 32-bit value will be byteswapped.
2721  */
2722 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2723 {
2724         int ret;
2725
2726         if (!tg3_flag(tp, NVRAM))
2727                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2728
2729         offset = tg3_nvram_phys_addr(tp, offset);
2730
2731         if (offset > NVRAM_ADDR_MSK)
2732                 return -EINVAL;
2733
2734         ret = tg3_nvram_lock(tp);
2735         if (ret)
2736                 return ret;
2737
2738         tg3_enable_nvram_access(tp);
2739
2740         tw32(NVRAM_ADDR, offset);
2741         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2742                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2743
2744         if (ret == 0)
2745                 *val = tr32(NVRAM_RDDATA);
2746
2747         tg3_disable_nvram_access(tp);
2748
2749         tg3_nvram_unlock(tp);
2750
2751         return ret;
2752 }
2753
2754 /* Ensures NVRAM data is in bytestream format. */
2755 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2756 {
2757         u32 v;
2758         int res = tg3_nvram_read(tp, offset, &v);
2759         if (!res)
2760                 *val = cpu_to_be32(v);
2761         return res;
2762 }
2763
2764 /* tp->lock is held. */
2765 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2766 {
2767         u32 addr_high, addr_low;
2768         int i;
2769
2770         addr_high = ((tp->dev->dev_addr[0] << 8) |
2771                      tp->dev->dev_addr[1]);
2772         addr_low = ((tp->dev->dev_addr[2] << 24) |
2773                     (tp->dev->dev_addr[3] << 16) |
2774                     (tp->dev->dev_addr[4] <<  8) |
2775                     (tp->dev->dev_addr[5] <<  0));
2776         for (i = 0; i < 4; i++) {
2777                 if (i == 1 && skip_mac_1)
2778                         continue;
2779                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2780                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2781         }
2782
2783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2784             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2785                 for (i = 0; i < 12; i++) {
2786                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2787                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2788                 }
2789         }
2790
2791         addr_high = (tp->dev->dev_addr[0] +
2792                      tp->dev->dev_addr[1] +
2793                      tp->dev->dev_addr[2] +
2794                      tp->dev->dev_addr[3] +
2795                      tp->dev->dev_addr[4] +
2796                      tp->dev->dev_addr[5]) &
2797                 TX_BACKOFF_SEED_MASK;
2798         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2799 }
2800
2801 static void tg3_enable_register_access(struct tg3 *tp)
2802 {
2803         /*
2804          * Make sure register accesses (indirect or otherwise) will function
2805          * correctly.
2806          */
2807         pci_write_config_dword(tp->pdev,
2808                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2809 }
2810
2811 static int tg3_power_up(struct tg3 *tp)
2812 {
2813         int err;
2814
2815         tg3_enable_register_access(tp);
2816
2817         err = pci_set_power_state(tp->pdev, PCI_D0);
2818         if (!err) {
2819                 /* Switch out of Vaux if it is a NIC */
2820                 tg3_pwrsrc_switch_to_vmain(tp);
2821         } else {
2822                 netdev_err(tp->dev, "Transition to D0 failed\n");
2823         }
2824
2825         return err;
2826 }
2827
2828 static int tg3_power_down_prepare(struct tg3 *tp)
2829 {
2830         u32 misc_host_ctrl;
2831         bool device_should_wake, do_low_power;
2832
2833         tg3_enable_register_access(tp);
2834
2835         /* Restore the CLKREQ setting. */
2836         if (tg3_flag(tp, CLKREQ_BUG)) {
2837                 u16 lnkctl;
2838
2839                 pci_read_config_word(tp->pdev,
2840                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2841                                      &lnkctl);
2842                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2843                 pci_write_config_word(tp->pdev,
2844                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2845                                       lnkctl);
2846         }
2847
2848         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2849         tw32(TG3PCI_MISC_HOST_CTRL,
2850              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2851
2852         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2853                              tg3_flag(tp, WOL_ENABLE);
2854
2855         if (tg3_flag(tp, USE_PHYLIB)) {
2856                 do_low_power = false;
2857                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2858                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2859                         struct phy_device *phydev;
2860                         u32 phyid, advertising;
2861
2862                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2863
2864                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2865
2866                         tp->link_config.orig_speed = phydev->speed;
2867                         tp->link_config.orig_duplex = phydev->duplex;
2868                         tp->link_config.orig_autoneg = phydev->autoneg;
2869                         tp->link_config.orig_advertising = phydev->advertising;
2870
2871                         advertising = ADVERTISED_TP |
2872                                       ADVERTISED_Pause |
2873                                       ADVERTISED_Autoneg |
2874                                       ADVERTISED_10baseT_Half;
2875
2876                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2877                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2878                                         advertising |=
2879                                                 ADVERTISED_100baseT_Half |
2880                                                 ADVERTISED_100baseT_Full |
2881                                                 ADVERTISED_10baseT_Full;
2882                                 else
2883                                         advertising |= ADVERTISED_10baseT_Full;
2884                         }
2885
2886                         phydev->advertising = advertising;
2887
2888                         phy_start_aneg(phydev);
2889
2890                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2891                         if (phyid != PHY_ID_BCMAC131) {
2892                                 phyid &= PHY_BCM_OUI_MASK;
2893                                 if (phyid == PHY_BCM_OUI_1 ||
2894                                     phyid == PHY_BCM_OUI_2 ||
2895                                     phyid == PHY_BCM_OUI_3)
2896                                         do_low_power = true;
2897                         }
2898                 }
2899         } else {
2900                 do_low_power = true;
2901
2902                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2903                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2904                         tp->link_config.orig_speed = tp->link_config.speed;
2905                         tp->link_config.orig_duplex = tp->link_config.duplex;
2906                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2907                 }
2908
2909                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2910                         tp->link_config.speed = SPEED_10;
2911                         tp->link_config.duplex = DUPLEX_HALF;
2912                         tp->link_config.autoneg = AUTONEG_ENABLE;
2913                         tg3_setup_phy(tp, 0);
2914                 }
2915         }
2916
2917         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2918                 u32 val;
2919
2920                 val = tr32(GRC_VCPU_EXT_CTRL);
2921                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2922         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2923                 int i;
2924                 u32 val;
2925
2926                 for (i = 0; i < 200; i++) {
2927                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2928                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2929                                 break;
2930                         msleep(1);
2931                 }
2932         }
2933         if (tg3_flag(tp, WOL_CAP))
2934                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2935                                                      WOL_DRV_STATE_SHUTDOWN |
2936                                                      WOL_DRV_WOL |
2937                                                      WOL_SET_MAGIC_PKT);
2938
2939         if (device_should_wake) {
2940                 u32 mac_mode;
2941
2942                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2943                         if (do_low_power &&
2944                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2945                                 tg3_phy_auxctl_write(tp,
2946                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2947                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2948                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2949                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2950                                 udelay(40);
2951                         }
2952
2953                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2954                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2955                         else
2956                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2957
2958                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2959                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2960                             ASIC_REV_5700) {
2961                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2962                                              SPEED_100 : SPEED_10;
2963                                 if (tg3_5700_link_polarity(tp, speed))
2964                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2965                                 else
2966                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2967                         }
2968                 } else {
2969                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2970                 }
2971
2972                 if (!tg3_flag(tp, 5750_PLUS))
2973                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2974
2975                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2976                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2977                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2978                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2979
2980                 if (tg3_flag(tp, ENABLE_APE))
2981                         mac_mode |= MAC_MODE_APE_TX_EN |
2982                                     MAC_MODE_APE_RX_EN |
2983                                     MAC_MODE_TDE_ENABLE;
2984
2985                 tw32_f(MAC_MODE, mac_mode);
2986                 udelay(100);
2987
2988                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2989                 udelay(10);
2990         }
2991
2992         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2993             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2994              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2995                 u32 base_val;
2996
2997                 base_val = tp->pci_clock_ctrl;
2998                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2999                              CLOCK_CTRL_TXCLK_DISABLE);
3000
3001                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3002                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3003         } else if (tg3_flag(tp, 5780_CLASS) ||
3004                    tg3_flag(tp, CPMU_PRESENT) ||
3005                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3006                 /* do nothing */
3007         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3008                 u32 newbits1, newbits2;
3009
3010                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3011                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3012                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3013                                     CLOCK_CTRL_TXCLK_DISABLE |
3014                                     CLOCK_CTRL_ALTCLK);
3015                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3016                 } else if (tg3_flag(tp, 5705_PLUS)) {
3017                         newbits1 = CLOCK_CTRL_625_CORE;
3018                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3019                 } else {
3020                         newbits1 = CLOCK_CTRL_ALTCLK;
3021                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3022                 }
3023
3024                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3025                             40);
3026
3027                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3028                             40);
3029
3030                 if (!tg3_flag(tp, 5705_PLUS)) {
3031                         u32 newbits3;
3032
3033                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3034                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3035                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3036                                             CLOCK_CTRL_TXCLK_DISABLE |
3037                                             CLOCK_CTRL_44MHZ_CORE);
3038                         } else {
3039                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3040                         }
3041
3042                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3043                                     tp->pci_clock_ctrl | newbits3, 40);
3044                 }
3045         }
3046
3047         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3048                 tg3_power_down_phy(tp, do_low_power);
3049
3050         tg3_frob_aux_power(tp, true);
3051
3052         /* Workaround for unstable PLL clock */
3053         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3054             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3055                 u32 val = tr32(0x7d00);
3056
3057                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3058                 tw32(0x7d00, val);
3059                 if (!tg3_flag(tp, ENABLE_ASF)) {
3060                         int err;
3061
3062                         err = tg3_nvram_lock(tp);
3063                         tg3_halt_cpu(tp, RX_CPU_BASE);
3064                         if (!err)
3065                                 tg3_nvram_unlock(tp);
3066                 }
3067         }
3068
3069         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3070
3071         return 0;
3072 }
3073
3074 static void tg3_power_down(struct tg3 *tp)
3075 {
3076         tg3_power_down_prepare(tp);
3077
3078         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3079         pci_set_power_state(tp->pdev, PCI_D3hot);
3080 }
3081
3082 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3083 {
3084         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3085         case MII_TG3_AUX_STAT_10HALF:
3086                 *speed = SPEED_10;
3087                 *duplex = DUPLEX_HALF;
3088                 break;
3089
3090         case MII_TG3_AUX_STAT_10FULL:
3091                 *speed = SPEED_10;
3092                 *duplex = DUPLEX_FULL;
3093                 break;
3094
3095         case MII_TG3_AUX_STAT_100HALF:
3096                 *speed = SPEED_100;
3097                 *duplex = DUPLEX_HALF;
3098                 break;
3099
3100         case MII_TG3_AUX_STAT_100FULL:
3101                 *speed = SPEED_100;
3102                 *duplex = DUPLEX_FULL;
3103                 break;
3104
3105         case MII_TG3_AUX_STAT_1000HALF:
3106                 *speed = SPEED_1000;
3107                 *duplex = DUPLEX_HALF;
3108                 break;
3109
3110         case MII_TG3_AUX_STAT_1000FULL:
3111                 *speed = SPEED_1000;
3112                 *duplex = DUPLEX_FULL;
3113                 break;
3114
3115         default:
3116                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3117                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3118                                  SPEED_10;
3119                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3120                                   DUPLEX_HALF;
3121                         break;
3122                 }
3123                 *speed = SPEED_INVALID;
3124                 *duplex = DUPLEX_INVALID;
3125                 break;
3126         }
3127 }
3128
3129 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3130 {
3131         int err = 0;
3132         u32 val, new_adv;
3133
3134         new_adv = ADVERTISE_CSMA;
3135         if (advertise & ADVERTISED_10baseT_Half)
3136                 new_adv |= ADVERTISE_10HALF;
3137         if (advertise & ADVERTISED_10baseT_Full)
3138                 new_adv |= ADVERTISE_10FULL;
3139         if (advertise & ADVERTISED_100baseT_Half)
3140                 new_adv |= ADVERTISE_100HALF;
3141         if (advertise & ADVERTISED_100baseT_Full)
3142                 new_adv |= ADVERTISE_100FULL;
3143
3144         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3145
3146         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3147         if (err)
3148                 goto done;
3149
3150         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3151                 goto done;
3152
3153         new_adv = 0;
3154         if (advertise & ADVERTISED_1000baseT_Half)
3155                 new_adv |= ADVERTISE_1000HALF;
3156         if (advertise & ADVERTISED_1000baseT_Full)
3157                 new_adv |= ADVERTISE_1000FULL;
3158
3159         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3160             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3161                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3162
3163         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3164         if (err)
3165                 goto done;
3166
3167         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3168                 goto done;
3169
3170         tw32(TG3_CPMU_EEE_MODE,
3171              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3172
3173         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3174         if (!err) {
3175                 u32 err2;
3176
3177                 val = 0;
3178                 /* Advertise 100-BaseTX EEE ability */
3179                 if (advertise & ADVERTISED_100baseT_Full)
3180                         val |= MDIO_AN_EEE_ADV_100TX;
3181                 /* Advertise 1000-BaseT EEE ability */
3182                 if (advertise & ADVERTISED_1000baseT_Full)
3183                         val |= MDIO_AN_EEE_ADV_1000T;
3184                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3185                 if (err)
3186                         val = 0;
3187
3188                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3189                 case ASIC_REV_5717:
3190                 case ASIC_REV_57765:
3191                 case ASIC_REV_5719:
3192                         /* If we advertised any eee advertisements above... */
3193                         if (val)
3194                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3195                                       MII_TG3_DSP_TAP26_RMRXSTO |
3196                                       MII_TG3_DSP_TAP26_OPCSINPT;
3197                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3198                         /* Fall through */
3199                 case ASIC_REV_5720:
3200                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3201                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3202                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3203                 }
3204
3205                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3206                 if (!err)
3207                         err = err2;
3208         }
3209
3210 done:
3211         return err;
3212 }
3213
3214 static void tg3_phy_copper_begin(struct tg3 *tp)
3215 {
3216         u32 new_adv;
3217         int i;
3218
3219         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3220                 new_adv = ADVERTISED_10baseT_Half |
3221                           ADVERTISED_10baseT_Full;
3222                 if (tg3_flag(tp, WOL_SPEED_100MB))
3223                         new_adv |= ADVERTISED_100baseT_Half |
3224                                    ADVERTISED_100baseT_Full;
3225
3226                 tg3_phy_autoneg_cfg(tp, new_adv,
3227                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3228         } else if (tp->link_config.speed == SPEED_INVALID) {
3229                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3230                         tp->link_config.advertising &=
3231                                 ~(ADVERTISED_1000baseT_Half |
3232                                   ADVERTISED_1000baseT_Full);
3233
3234                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3235                                     tp->link_config.flowctrl);
3236         } else {
3237                 /* Asking for a specific link mode. */
3238                 if (tp->link_config.speed == SPEED_1000) {
3239                         if (tp->link_config.duplex == DUPLEX_FULL)
3240                                 new_adv = ADVERTISED_1000baseT_Full;
3241                         else
3242                                 new_adv = ADVERTISED_1000baseT_Half;
3243                 } else if (tp->link_config.speed == SPEED_100) {
3244                         if (tp->link_config.duplex == DUPLEX_FULL)
3245                                 new_adv = ADVERTISED_100baseT_Full;
3246                         else
3247                                 new_adv = ADVERTISED_100baseT_Half;
3248                 } else {
3249                         if (tp->link_config.duplex == DUPLEX_FULL)
3250                                 new_adv = ADVERTISED_10baseT_Full;
3251                         else
3252                                 new_adv = ADVERTISED_10baseT_Half;
3253                 }
3254
3255                 tg3_phy_autoneg_cfg(tp, new_adv,
3256                                     tp->link_config.flowctrl);
3257         }
3258
3259         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3260             tp->link_config.speed != SPEED_INVALID) {
3261                 u32 bmcr, orig_bmcr;
3262
3263                 tp->link_config.active_speed = tp->link_config.speed;
3264                 tp->link_config.active_duplex = tp->link_config.duplex;
3265
3266                 bmcr = 0;
3267                 switch (tp->link_config.speed) {
3268                 default:
3269                 case SPEED_10:
3270                         break;
3271
3272                 case SPEED_100:
3273                         bmcr |= BMCR_SPEED100;
3274                         break;
3275
3276                 case SPEED_1000:
3277                         bmcr |= BMCR_SPEED1000;
3278                         break;
3279                 }
3280
3281                 if (tp->link_config.duplex == DUPLEX_FULL)
3282                         bmcr |= BMCR_FULLDPLX;
3283
3284                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3285                     (bmcr != orig_bmcr)) {
3286                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3287                         for (i = 0; i < 1500; i++) {
3288                                 u32 tmp;
3289
3290                                 udelay(10);
3291                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3292                                     tg3_readphy(tp, MII_BMSR, &tmp))
3293                                         continue;
3294                                 if (!(tmp & BMSR_LSTATUS)) {
3295                                         udelay(40);
3296                                         break;
3297                                 }
3298                         }
3299                         tg3_writephy(tp, MII_BMCR, bmcr);
3300                         udelay(40);
3301                 }
3302         } else {
3303                 tg3_writephy(tp, MII_BMCR,
3304                              BMCR_ANENABLE | BMCR_ANRESTART);
3305         }
3306 }
3307
3308 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3309 {
3310         int err;
3311
3312         /* Turn off tap power management. */
3313         /* Set Extended packet length bit */
3314         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3315
3316         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3317         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3318         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3319         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3320         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3321
3322         udelay(40);
3323
3324         return err;
3325 }
3326
3327 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3328 {
3329         u32 adv_reg, all_mask = 0;
3330
3331         if (mask & ADVERTISED_10baseT_Half)
3332                 all_mask |= ADVERTISE_10HALF;
3333         if (mask & ADVERTISED_10baseT_Full)
3334                 all_mask |= ADVERTISE_10FULL;
3335         if (mask & ADVERTISED_100baseT_Half)
3336                 all_mask |= ADVERTISE_100HALF;
3337         if (mask & ADVERTISED_100baseT_Full)
3338                 all_mask |= ADVERTISE_100FULL;
3339
3340         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3341                 return 0;
3342
3343         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3344                 return 0;
3345
3346         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3347                 u32 tg3_ctrl;
3348
3349                 all_mask = 0;
3350                 if (mask & ADVERTISED_1000baseT_Half)
3351                         all_mask |= ADVERTISE_1000HALF;
3352                 if (mask & ADVERTISED_1000baseT_Full)
3353                         all_mask |= ADVERTISE_1000FULL;
3354
3355                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3356                         return 0;
3357
3358                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3359                 if (tg3_ctrl != all_mask)
3360                         return 0;
3361         }
3362         return 1;
3363 }
3364
3365 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3366 {
3367         u32 curadv, reqadv;
3368
3369         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3370                 return 1;
3371
3372         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3373         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3374
3375         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3376                 if (curadv != reqadv)
3377                         return 0;
3378
3379                 if (tg3_flag(tp, PAUSE_AUTONEG))
3380                         tg3_readphy(tp, MII_LPA, rmtadv);
3381         } else {
3382                 /* Reprogram the advertisement register, even if it
3383                  * does not affect the current link.  If the link
3384                  * gets renegotiated in the future, we can save an
3385                  * additional renegotiation cycle by advertising
3386                  * it correctly in the first place.
3387                  */
3388                 if (curadv != reqadv) {
3389                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3390                                      ADVERTISE_PAUSE_ASYM);
3391                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3392                 }
3393         }
3394
3395         return 1;
3396 }
3397
3398 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3399 {
3400         int current_link_up;
3401         u32 bmsr, val;
3402         u32 lcl_adv, rmt_adv;
3403         u16 current_speed;
3404         u8 current_duplex;
3405         int i, err;
3406
3407         tw32(MAC_EVENT, 0);
3408
3409         tw32_f(MAC_STATUS,
3410              (MAC_STATUS_SYNC_CHANGED |
3411               MAC_STATUS_CFG_CHANGED |
3412               MAC_STATUS_MI_COMPLETION |
3413               MAC_STATUS_LNKSTATE_CHANGED));
3414         udelay(40);
3415
3416         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3417                 tw32_f(MAC_MI_MODE,
3418                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3419                 udelay(80);
3420         }
3421
3422         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3423
3424         /* Some third-party PHYs need to be reset on link going
3425          * down.
3426          */
3427         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3428              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3429              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3430             netif_carrier_ok(tp->dev)) {
3431                 tg3_readphy(tp, MII_BMSR, &bmsr);
3432                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3433                     !(bmsr & BMSR_LSTATUS))
3434                         force_reset = 1;
3435         }
3436         if (force_reset)
3437                 tg3_phy_reset(tp);
3438
3439         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3440                 tg3_readphy(tp, MII_BMSR, &bmsr);
3441                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3442                     !tg3_flag(tp, INIT_COMPLETE))
3443                         bmsr = 0;
3444
3445                 if (!(bmsr & BMSR_LSTATUS)) {
3446                         err = tg3_init_5401phy_dsp(tp);
3447                         if (err)
3448                                 return err;
3449
3450                         tg3_readphy(tp, MII_BMSR, &bmsr);
3451                         for (i = 0; i < 1000; i++) {
3452                                 udelay(10);
3453                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3454                                     (bmsr & BMSR_LSTATUS)) {
3455                                         udelay(40);
3456                                         break;
3457                                 }
3458                         }
3459
3460                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3461                             TG3_PHY_REV_BCM5401_B0 &&
3462                             !(bmsr & BMSR_LSTATUS) &&
3463                             tp->link_config.active_speed == SPEED_1000) {
3464                                 err = tg3_phy_reset(tp);
3465                                 if (!err)
3466                                         err = tg3_init_5401phy_dsp(tp);
3467                                 if (err)
3468                                         return err;
3469                         }
3470                 }
3471         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3472                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3473                 /* 5701 {A0,B0} CRC bug workaround */
3474                 tg3_writephy(tp, 0x15, 0x0a75);
3475                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3476                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3477                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3478         }
3479
3480         /* Clear pending interrupts... */
3481         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3482         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3483
3484         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3485                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3486         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3487                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3488
3489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3491                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3492                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3493                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3494                 else
3495                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3496         }
3497
3498         current_link_up = 0;
3499         current_speed = SPEED_INVALID;
3500         current_duplex = DUPLEX_INVALID;
3501
3502         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3503                 err = tg3_phy_auxctl_read(tp,
3504                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3505                                           &val);
3506                 if (!err && !(val & (1 << 10))) {
3507                         tg3_phy_auxctl_write(tp,
3508                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3509                                              val | (1 << 10));
3510                         goto relink;
3511                 }
3512         }
3513
3514         bmsr = 0;
3515         for (i = 0; i < 100; i++) {
3516                 tg3_readphy(tp, MII_BMSR, &bmsr);
3517                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3518                     (bmsr & BMSR_LSTATUS))
3519                         break;
3520                 udelay(40);
3521         }
3522
3523         if (bmsr & BMSR_LSTATUS) {
3524                 u32 aux_stat, bmcr;
3525
3526                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3527                 for (i = 0; i < 2000; i++) {
3528                         udelay(10);
3529                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3530                             aux_stat)
3531                                 break;
3532                 }
3533
3534                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3535                                              &current_speed,
3536                                              &current_duplex);
3537
3538                 bmcr = 0;
3539                 for (i = 0; i < 200; i++) {
3540                         tg3_readphy(tp, MII_BMCR, &bmcr);
3541                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3542                                 continue;
3543                         if (bmcr && bmcr != 0x7fff)
3544                                 break;
3545                         udelay(10);
3546                 }
3547
3548                 lcl_adv = 0;
3549                 rmt_adv = 0;
3550
3551                 tp->link_config.active_speed = current_speed;
3552                 tp->link_config.active_duplex = current_duplex;
3553
3554                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3555                         if ((bmcr & BMCR_ANENABLE) &&
3556                             tg3_copper_is_advertising_all(tp,
3557                                                 tp->link_config.advertising)) {
3558                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3559                                                                   &rmt_adv))
3560                                         current_link_up = 1;
3561                         }
3562                 } else {
3563                         if (!(bmcr & BMCR_ANENABLE) &&
3564                             tp->link_config.speed == current_speed &&
3565                             tp->link_config.duplex == current_duplex &&
3566                             tp->link_config.flowctrl ==
3567                             tp->link_config.active_flowctrl) {
3568                                 current_link_up = 1;
3569                         }
3570                 }
3571
3572                 if (current_link_up == 1 &&
3573                     tp->link_config.active_duplex == DUPLEX_FULL)
3574                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3575         }
3576
3577 relink:
3578         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3579                 tg3_phy_copper_begin(tp);
3580
3581                 tg3_readphy(tp, MII_BMSR, &bmsr);
3582                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3583                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3584                         current_link_up = 1;
3585         }
3586
3587         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3588         if (current_link_up == 1) {
3589                 if (tp->link_config.active_speed == SPEED_100 ||
3590                     tp->link_config.active_speed == SPEED_10)
3591                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3592                 else
3593                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3594         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3595                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3596         else
3597                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3598
3599         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3600         if (tp->link_config.active_duplex == DUPLEX_HALF)
3601                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3602
3603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3604                 if (current_link_up == 1 &&
3605                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3606                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3607                 else
3608                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3609         }
3610
3611         /* ??? Without this setting Netgear GA302T PHY does not
3612          * ??? send/receive packets...
3613          */
3614         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3615             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3616                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3617                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3618                 udelay(80);
3619         }
3620
3621         tw32_f(MAC_MODE, tp->mac_mode);
3622         udelay(40);
3623
3624         tg3_phy_eee_adjust(tp, current_link_up);
3625
3626         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3627                 /* Polled via timer. */
3628                 tw32_f(MAC_EVENT, 0);
3629         } else {
3630                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3631         }
3632         udelay(40);
3633
3634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3635             current_link_up == 1 &&
3636             tp->link_config.active_speed == SPEED_1000 &&
3637             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3638                 udelay(120);
3639                 tw32_f(MAC_STATUS,
3640                      (MAC_STATUS_SYNC_CHANGED |
3641                       MAC_STATUS_CFG_CHANGED));
3642                 udelay(40);
3643                 tg3_write_mem(tp,
3644                               NIC_SRAM_FIRMWARE_MBOX,
3645                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3646         }
3647
3648         /* Prevent send BD corruption. */
3649         if (tg3_flag(tp, CLKREQ_BUG)) {
3650                 u16 oldlnkctl, newlnkctl;
3651
3652                 pci_read_config_word(tp->pdev,
3653                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3654                                      &oldlnkctl);
3655                 if (tp->link_config.active_speed == SPEED_100 ||
3656                     tp->link_config.active_speed == SPEED_10)
3657                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3658                 else
3659                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3660                 if (newlnkctl != oldlnkctl)
3661                         pci_write_config_word(tp->pdev,
3662                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3663                                               newlnkctl);
3664         }
3665
3666         if (current_link_up != netif_carrier_ok(tp->dev)) {
3667                 if (current_link_up)
3668                         netif_carrier_on(tp->dev);
3669                 else
3670                         netif_carrier_off(tp->dev);
3671                 tg3_link_report(tp);
3672         }
3673
3674         return 0;
3675 }
3676
3677 struct tg3_fiber_aneginfo {
3678         int state;
3679 #define ANEG_STATE_UNKNOWN              0
3680 #define ANEG_STATE_AN_ENABLE            1
3681 #define ANEG_STATE_RESTART_INIT         2
3682 #define ANEG_STATE_RESTART              3
3683 #define ANEG_STATE_DISABLE_LINK_OK      4
3684 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3685 #define ANEG_STATE_ABILITY_DETECT       6
3686 #define ANEG_STATE_ACK_DETECT_INIT      7
3687 #define ANEG_STATE_ACK_DETECT           8
3688 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3689 #define ANEG_STATE_COMPLETE_ACK         10
3690 #define ANEG_STATE_IDLE_DETECT_INIT     11
3691 #define ANEG_STATE_IDLE_DETECT          12
3692 #define ANEG_STATE_LINK_OK              13
3693 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3694 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3695
3696         u32 flags;
3697 #define MR_AN_ENABLE            0x00000001
3698 #define MR_RESTART_AN           0x00000002
3699 #define MR_AN_COMPLETE          0x00000004
3700 #define MR_PAGE_RX              0x00000008
3701 #define MR_NP_LOADED            0x00000010
3702 #define MR_TOGGLE_TX            0x00000020
3703 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3704 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3705 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3706 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3707 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3708 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3709 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3710 #define MR_TOGGLE_RX            0x00002000
3711 #define MR_NP_RX                0x00004000
3712
3713 #define MR_LINK_OK              0x80000000
3714
3715         unsigned long link_time, cur_time;
3716
3717         u32 ability_match_cfg;
3718         int ability_match_count;
3719
3720         char ability_match, idle_match, ack_match;
3721
3722         u32 txconfig, rxconfig;
3723 #define ANEG_CFG_NP             0x00000080
3724 #define ANEG_CFG_ACK            0x00000040
3725 #define ANEG_CFG_RF2            0x00000020
3726 #define ANEG_CFG_RF1            0x00000010
3727 #define ANEG_CFG_PS2            0x00000001
3728 #define ANEG_CFG_PS1            0x00008000
3729 #define ANEG_CFG_HD             0x00004000
3730 #define ANEG_CFG_FD             0x00002000
3731 #define ANEG_CFG_INVAL          0x00001f06
3732
3733 };
3734 #define ANEG_OK         0
3735 #define ANEG_DONE       1
3736 #define ANEG_TIMER_ENAB 2
3737 #define ANEG_FAILED     -1
3738
3739 #define ANEG_STATE_SETTLE_TIME  10000
3740
3741 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3742                                    struct tg3_fiber_aneginfo *ap)
3743 {
3744         u16 flowctrl;
3745         unsigned long delta;
3746         u32 rx_cfg_reg;
3747         int ret;
3748
3749         if (ap->state == ANEG_STATE_UNKNOWN) {
3750                 ap->rxconfig = 0;
3751                 ap->link_time = 0;
3752                 ap->cur_time = 0;
3753                 ap->ability_match_cfg = 0;
3754                 ap->ability_match_count = 0;
3755                 ap->ability_match = 0;
3756                 ap->idle_match = 0;
3757                 ap->ack_match = 0;
3758         }
3759         ap->cur_time++;
3760
3761         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3762                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3763
3764                 if (rx_cfg_reg != ap->ability_match_cfg) {
3765                         ap->ability_match_cfg = rx_cfg_reg;
3766                         ap->ability_match = 0;
3767                         ap->ability_match_count = 0;
3768                 } else {
3769                         if (++ap->ability_match_count > 1) {
3770                                 ap->ability_match = 1;
3771                                 ap->ability_match_cfg = rx_cfg_reg;
3772                         }
3773                 }
3774                 if (rx_cfg_reg & ANEG_CFG_ACK)
3775                         ap->ack_match = 1;
3776                 else
3777                         ap->ack_match = 0;
3778
3779                 ap->idle_match = 0;
3780         } else {
3781                 ap->idle_match = 1;
3782                 ap->ability_match_cfg = 0;
3783                 ap->ability_match_count = 0;
3784                 ap->ability_match = 0;
3785                 ap->ack_match = 0;
3786
3787                 rx_cfg_reg = 0;
3788         }
3789
3790         ap->rxconfig = rx_cfg_reg;
3791         ret = ANEG_OK;
3792
3793         switch (ap->state) {
3794         case ANEG_STATE_UNKNOWN:
3795                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3796                         ap->state = ANEG_STATE_AN_ENABLE;
3797
3798                 /* fallthru */
3799         case ANEG_STATE_AN_ENABLE:
3800                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3801                 if (ap->flags & MR_AN_ENABLE) {
3802                         ap->link_time = 0;
3803                         ap->cur_time = 0;
3804                         ap->ability_match_cfg = 0;
3805                         ap->ability_match_count = 0;
3806                         ap->ability_match = 0;
3807                         ap->idle_match = 0;
3808                         ap->ack_match = 0;
3809
3810                         ap->state = ANEG_STATE_RESTART_INIT;
3811                 } else {
3812                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3813                 }
3814                 break;
3815
3816         case ANEG_STATE_RESTART_INIT:
3817                 ap->link_time = ap->cur_time;
3818                 ap->flags &= ~(MR_NP_LOADED);
3819                 ap->txconfig = 0;
3820                 tw32(MAC_TX_AUTO_NEG, 0);
3821                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3822                 tw32_f(MAC_MODE, tp->mac_mode);
3823                 udelay(40);
3824
3825                 ret = ANEG_TIMER_ENAB;
3826                 ap->state = ANEG_STATE_RESTART;
3827
3828                 /* fallthru */
3829         case ANEG_STATE_RESTART:
3830                 delta = ap->cur_time - ap->link_time;
3831                 if (delta > ANEG_STATE_SETTLE_TIME)
3832                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3833                 else
3834                         ret = ANEG_TIMER_ENAB;
3835                 break;
3836
3837         case ANEG_STATE_DISABLE_LINK_OK:
3838                 ret = ANEG_DONE;
3839                 break;
3840
3841         case ANEG_STATE_ABILITY_DETECT_INIT:
3842                 ap->flags &= ~(MR_TOGGLE_TX);
3843                 ap->txconfig = ANEG_CFG_FD;
3844                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3845                 if (flowctrl & ADVERTISE_1000XPAUSE)
3846                         ap->txconfig |= ANEG_CFG_PS1;
3847                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3848                         ap->txconfig |= ANEG_CFG_PS2;
3849                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3850                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3851                 tw32_f(MAC_MODE, tp->mac_mode);
3852                 udelay(40);
3853
3854                 ap->state = ANEG_STATE_ABILITY_DETECT;
3855                 break;
3856
3857         case ANEG_STATE_ABILITY_DETECT:
3858                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3859                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3860                 break;
3861
3862         case ANEG_STATE_ACK_DETECT_INIT:
3863                 ap->txconfig |= ANEG_CFG_ACK;
3864                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3865                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3866                 tw32_f(MAC_MODE, tp->mac_mode);
3867                 udelay(40);
3868
3869                 ap->state = ANEG_STATE_ACK_DETECT;
3870
3871                 /* fallthru */
3872         case ANEG_STATE_ACK_DETECT:
3873                 if (ap->ack_match != 0) {
3874                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3875                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3876                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3877                         } else {
3878                                 ap->state = ANEG_STATE_AN_ENABLE;
3879                         }
3880                 } else if (ap->ability_match != 0 &&
3881                            ap->rxconfig == 0) {
3882                         ap->state = ANEG_STATE_AN_ENABLE;
3883                 }
3884                 break;
3885
3886         case ANEG_STATE_COMPLETE_ACK_INIT:
3887                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3888                         ret = ANEG_FAILED;
3889                         break;
3890                 }
3891                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3892                                MR_LP_ADV_HALF_DUPLEX |
3893                                MR_LP_ADV_SYM_PAUSE |
3894                                MR_LP_ADV_ASYM_PAUSE |
3895                                MR_LP_ADV_REMOTE_FAULT1 |
3896                                MR_LP_ADV_REMOTE_FAULT2 |
3897                                MR_LP_ADV_NEXT_PAGE |
3898                                MR_TOGGLE_RX |
3899                                MR_NP_RX);
3900                 if (ap->rxconfig & ANEG_CFG_FD)
3901                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3902                 if (ap->rxconfig & ANEG_CFG_HD)
3903                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3904                 if (ap->rxconfig & ANEG_CFG_PS1)
3905                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3906                 if (ap->rxconfig & ANEG_CFG_PS2)
3907                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3908                 if (ap->rxconfig & ANEG_CFG_RF1)
3909                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3910                 if (ap->rxconfig & ANEG_CFG_RF2)
3911                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3912                 if (ap->rxconfig & ANEG_CFG_NP)
3913                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3914
3915                 ap->link_time = ap->cur_time;
3916
3917                 ap->flags ^= (MR_TOGGLE_TX);
3918                 if (ap->rxconfig & 0x0008)
3919                         ap->flags |= MR_TOGGLE_RX;
3920                 if (ap->rxconfig & ANEG_CFG_NP)
3921                         ap->flags |= MR_NP_RX;
3922                 ap->flags |= MR_PAGE_RX;
3923
3924                 ap->state = ANEG_STATE_COMPLETE_ACK;
3925                 ret = ANEG_TIMER_ENAB;
3926                 break;
3927
3928         case ANEG_STATE_COMPLETE_ACK:
3929                 if (ap->ability_match != 0 &&
3930                     ap->rxconfig == 0) {
3931                         ap->state = ANEG_STATE_AN_ENABLE;
3932                         break;
3933                 }
3934                 delta = ap->cur_time - ap->link_time;
3935                 if (delta > ANEG_STATE_SETTLE_TIME) {
3936                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3937                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3938                         } else {
3939                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3940                                     !(ap->flags & MR_NP_RX)) {
3941                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3942                                 } else {
3943                                         ret = ANEG_FAILED;
3944                                 }
3945                         }
3946                 }
3947                 break;
3948
3949         case ANEG_STATE_IDLE_DETECT_INIT:
3950                 ap->link_time = ap->cur_time;
3951                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3952                 tw32_f(MAC_MODE, tp->mac_mode);
3953                 udelay(40);
3954
3955                 ap->state = ANEG_STATE_IDLE_DETECT;
3956                 ret = ANEG_TIMER_ENAB;
3957                 break;
3958
3959         case ANEG_STATE_IDLE_DETECT:
3960                 if (ap->ability_match != 0 &&
3961                     ap->rxconfig == 0) {
3962                         ap->state = ANEG_STATE_AN_ENABLE;
3963                         break;
3964                 }
3965                 delta = ap->cur_time - ap->link_time;
3966                 if (delta > ANEG_STATE_SETTLE_TIME) {
3967                         /* XXX another gem from the Broadcom driver :( */
3968                         ap->state = ANEG_STATE_LINK_OK;
3969                 }
3970                 break;
3971
3972         case ANEG_STATE_LINK_OK:
3973                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3974                 ret = ANEG_DONE;
3975                 break;
3976
3977         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3978                 /* ??? unimplemented */
3979                 break;
3980
3981         case ANEG_STATE_NEXT_PAGE_WAIT:
3982                 /* ??? unimplemented */
3983                 break;
3984
3985         default:
3986                 ret = ANEG_FAILED;
3987                 break;
3988         }
3989
3990         return ret;
3991 }
3992
3993 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3994 {
3995         int res = 0;
3996         struct tg3_fiber_aneginfo aninfo;
3997         int status = ANEG_FAILED;
3998         unsigned int tick;
3999         u32 tmp;
4000
4001         tw32_f(MAC_TX_AUTO_NEG, 0);
4002
4003         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4004         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4005         udelay(40);
4006
4007         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4008         udelay(40);
4009
4010         memset(&aninfo, 0, sizeof(aninfo));
4011         aninfo.flags |= MR_AN_ENABLE;
4012         aninfo.state = ANEG_STATE_UNKNOWN;
4013         aninfo.cur_time = 0;
4014         tick = 0;
4015         while (++tick < 195000) {
4016                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4017                 if (status == ANEG_DONE || status == ANEG_FAILED)
4018                         break;
4019
4020                 udelay(1);
4021         }
4022
4023         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4024         tw32_f(MAC_MODE, tp->mac_mode);
4025         udelay(40);
4026
4027         *txflags = aninfo.txconfig;
4028         *rxflags = aninfo.flags;
4029
4030         if (status == ANEG_DONE &&
4031             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4032                              MR_LP_ADV_FULL_DUPLEX)))
4033                 res = 1;
4034
4035         return res;
4036 }
4037
4038 static void tg3_init_bcm8002(struct tg3 *tp)
4039 {
4040         u32 mac_status = tr32(MAC_STATUS);
4041         int i;
4042
4043         /* Reset when initting first time or we have a link. */
4044         if (tg3_flag(tp, INIT_COMPLETE) &&
4045             !(mac_status & MAC_STATUS_PCS_SYNCED))
4046                 return;
4047
4048         /* Set PLL lock range. */
4049         tg3_writephy(tp, 0x16, 0x8007);
4050
4051         /* SW reset */
4052         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4053
4054         /* Wait for reset to complete. */
4055         /* XXX schedule_timeout() ... */
4056         for (i = 0; i < 500; i++)
4057                 udelay(10);
4058
4059         /* Config mode; select PMA/Ch 1 regs. */
4060         tg3_writephy(tp, 0x10, 0x8411);
4061
4062         /* Enable auto-lock and comdet, select txclk for tx. */
4063         tg3_writephy(tp, 0x11, 0x0a10);
4064
4065         tg3_writephy(tp, 0x18, 0x00a0);
4066         tg3_writephy(tp, 0x16, 0x41ff);
4067
4068         /* Assert and deassert POR. */
4069         tg3_writephy(tp, 0x13, 0x0400);
4070         udelay(40);
4071         tg3_writephy(tp, 0x13, 0x0000);
4072
4073         tg3_writephy(tp, 0x11, 0x0a50);
4074         udelay(40);
4075         tg3_writephy(tp, 0x11, 0x0a10);
4076
4077         /* Wait for signal to stabilize */
4078         /* XXX schedule_timeout() ... */
4079         for (i = 0; i < 15000; i++)
4080                 udelay(10);
4081
4082         /* Deselect the channel register so we can read the PHYID
4083          * later.
4084          */
4085         tg3_writephy(tp, 0x10, 0x8011);
4086 }
4087
4088 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4089 {
4090         u16 flowctrl;
4091         u32 sg_dig_ctrl, sg_dig_status;
4092         u32 serdes_cfg, expected_sg_dig_ctrl;
4093         int workaround, port_a;
4094         int current_link_up;
4095
4096         serdes_cfg = 0;
4097         expected_sg_dig_ctrl = 0;
4098         workaround = 0;
4099         port_a = 1;
4100         current_link_up = 0;
4101
4102         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4103             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4104                 workaround = 1;
4105                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4106                         port_a = 0;
4107
4108                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4109                 /* preserve bits 20-23 for voltage regulator */
4110                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4111         }
4112
4113         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4114
4115         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4116                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4117                         if (workaround) {
4118                                 u32 val = serdes_cfg;
4119
4120                                 if (port_a)
4121                                         val |= 0xc010000;
4122                                 else
4123                                         val |= 0x4010000;
4124                                 tw32_f(MAC_SERDES_CFG, val);
4125                         }
4126
4127                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4128                 }
4129                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4130                         tg3_setup_flow_control(tp, 0, 0);
4131                         current_link_up = 1;
4132                 }
4133                 goto out;
4134         }
4135
4136         /* Want auto-negotiation.  */
4137         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4138
4139         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4140         if (flowctrl & ADVERTISE_1000XPAUSE)
4141                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4142         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4143                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4144
4145         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4146                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4147                     tp->serdes_counter &&
4148                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4149                                     MAC_STATUS_RCVD_CFG)) ==
4150                      MAC_STATUS_PCS_SYNCED)) {
4151                         tp->serdes_counter--;
4152                         current_link_up = 1;
4153                         goto out;
4154                 }
4155 restart_autoneg:
4156                 if (workaround)
4157                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4158                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4159                 udelay(5);
4160                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4161
4162                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4163                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4164         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4165                                  MAC_STATUS_SIGNAL_DET)) {
4166                 sg_dig_status = tr32(SG_DIG_STATUS);
4167                 mac_status = tr32(MAC_STATUS);
4168
4169                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4170                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4171                         u32 local_adv = 0, remote_adv = 0;
4172
4173                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4174                                 local_adv |= ADVERTISE_1000XPAUSE;
4175                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4176                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4177
4178                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4179                                 remote_adv |= LPA_1000XPAUSE;
4180                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4181                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4182
4183                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4184                         current_link_up = 1;
4185                         tp->serdes_counter = 0;
4186                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4187                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4188                         if (tp->serdes_counter)
4189                                 tp->serdes_counter--;
4190                         else {
4191                                 if (workaround) {
4192                                         u32 val = serdes_cfg;
4193
4194                                         if (port_a)
4195                                                 val |= 0xc010000;
4196                                         else
4197                                                 val |= 0x4010000;
4198
4199                                         tw32_f(MAC_SERDES_CFG, val);
4200                                 }
4201
4202                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4203                                 udelay(40);
4204
4205                                 /* Link parallel detection - link is up */
4206                                 /* only if we have PCS_SYNC and not */
4207                                 /* receiving config code words */
4208                                 mac_status = tr32(MAC_STATUS);
4209                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4210                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4211                                         tg3_setup_flow_control(tp, 0, 0);
4212                                         current_link_up = 1;
4213                                         tp->phy_flags |=
4214                                                 TG3_PHYFLG_PARALLEL_DETECT;
4215                                         tp->serdes_counter =
4216                                                 SERDES_PARALLEL_DET_TIMEOUT;
4217                                 } else
4218                                         goto restart_autoneg;
4219                         }
4220                 }
4221         } else {
4222                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4223                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4224         }
4225
4226 out:
4227         return current_link_up;
4228 }
4229
4230 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4231 {
4232         int current_link_up = 0;
4233
4234         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4235                 goto out;
4236
4237         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4238                 u32 txflags, rxflags;
4239                 int i;
4240
4241                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4242                         u32 local_adv = 0, remote_adv = 0;
4243
4244                         if (txflags & ANEG_CFG_PS1)
4245                                 local_adv |= ADVERTISE_1000XPAUSE;
4246                         if (txflags & ANEG_CFG_PS2)
4247                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4248
4249                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4250                                 remote_adv |= LPA_1000XPAUSE;
4251                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4252                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4253
4254                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4255
4256                         current_link_up = 1;
4257                 }
4258                 for (i = 0; i < 30; i++) {
4259                         udelay(20);
4260                         tw32_f(MAC_STATUS,
4261                                (MAC_STATUS_SYNC_CHANGED |
4262                                 MAC_STATUS_CFG_CHANGED));
4263                         udelay(40);
4264                         if ((tr32(MAC_STATUS) &
4265                              (MAC_STATUS_SYNC_CHANGED |
4266                               MAC_STATUS_CFG_CHANGED)) == 0)
4267                                 break;
4268                 }
4269
4270                 mac_status = tr32(MAC_STATUS);
4271                 if (current_link_up == 0 &&
4272                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4273                     !(mac_status & MAC_STATUS_RCVD_CFG))
4274                         current_link_up = 1;
4275         } else {
4276                 tg3_setup_flow_control(tp, 0, 0);
4277
4278                 /* Forcing 1000FD link up. */
4279                 current_link_up = 1;
4280
4281                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4282                 udelay(40);
4283
4284                 tw32_f(MAC_MODE, tp->mac_mode);
4285                 udelay(40);
4286         }
4287
4288 out:
4289         return current_link_up;
4290 }
4291
4292 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4293 {
4294         u32 orig_pause_cfg;
4295         u16 orig_active_speed;
4296         u8 orig_active_duplex;
4297         u32 mac_status;
4298         int current_link_up;
4299         int i;
4300
4301         orig_pause_cfg = tp->link_config.active_flowctrl;
4302         orig_active_speed = tp->link_config.active_speed;
4303         orig_active_duplex = tp->link_config.active_duplex;
4304
4305         if (!tg3_flag(tp, HW_AUTONEG) &&
4306             netif_carrier_ok(tp->dev) &&
4307             tg3_flag(tp, INIT_COMPLETE)) {
4308                 mac_status = tr32(MAC_STATUS);
4309                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4310                                MAC_STATUS_SIGNAL_DET |
4311                                MAC_STATUS_CFG_CHANGED |
4312                                MAC_STATUS_RCVD_CFG);
4313                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4314                                    MAC_STATUS_SIGNAL_DET)) {
4315                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4316                                             MAC_STATUS_CFG_CHANGED));
4317                         return 0;
4318                 }
4319         }
4320
4321         tw32_f(MAC_TX_AUTO_NEG, 0);
4322
4323         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4324         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4325         tw32_f(MAC_MODE, tp->mac_mode);
4326         udelay(40);
4327
4328         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4329                 tg3_init_bcm8002(tp);
4330
4331         /* Enable link change event even when serdes polling.  */
4332         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4333         udelay(40);
4334
4335         current_link_up = 0;
4336         mac_status = tr32(MAC_STATUS);
4337
4338         if (tg3_flag(tp, HW_AUTONEG))
4339                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4340         else
4341                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4342
4343         tp->napi[0].hw_status->status =
4344                 (SD_STATUS_UPDATED |
4345                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4346
4347         for (i = 0; i < 100; i++) {
4348                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4349                                     MAC_STATUS_CFG_CHANGED));
4350                 udelay(5);
4351                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4352                                          MAC_STATUS_CFG_CHANGED |
4353                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4354                         break;
4355         }
4356
4357         mac_status = tr32(MAC_STATUS);
4358         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4359                 current_link_up = 0;
4360                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4361                     tp->serdes_counter == 0) {
4362                         tw32_f(MAC_MODE, (tp->mac_mode |
4363                                           MAC_MODE_SEND_CONFIGS));
4364                         udelay(1);
4365                         tw32_f(MAC_MODE, tp->mac_mode);
4366                 }
4367         }
4368
4369         if (current_link_up == 1) {
4370                 tp->link_config.active_speed = SPEED_1000;
4371                 tp->link_config.active_duplex = DUPLEX_FULL;
4372                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4373                                     LED_CTRL_LNKLED_OVERRIDE |
4374                                     LED_CTRL_1000MBPS_ON));
4375         } else {
4376                 tp->link_config.active_speed = SPEED_INVALID;
4377                 tp->link_config.active_duplex = DUPLEX_INVALID;
4378                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4379                                     LED_CTRL_LNKLED_OVERRIDE |
4380                                     LED_CTRL_TRAFFIC_OVERRIDE));
4381         }
4382
4383         if (current_link_up != netif_carrier_ok(tp->dev)) {
4384                 if (current_link_up)
4385                         netif_carrier_on(tp->dev);
4386                 else
4387                         netif_carrier_off(tp->dev);
4388                 tg3_link_report(tp);
4389         } else {
4390                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4391                 if (orig_pause_cfg != now_pause_cfg ||
4392                     orig_active_speed != tp->link_config.active_speed ||
4393                     orig_active_duplex != tp->link_config.active_duplex)
4394                         tg3_link_report(tp);
4395         }
4396
4397         return 0;
4398 }
4399
4400 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4401 {
4402         int current_link_up, err = 0;
4403         u32 bmsr, bmcr;
4404         u16 current_speed;
4405         u8 current_duplex;
4406         u32 local_adv, remote_adv;
4407
4408         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4409         tw32_f(MAC_MODE, tp->mac_mode);
4410         udelay(40);
4411
4412         tw32(MAC_EVENT, 0);
4413
4414         tw32_f(MAC_STATUS,
4415              (MAC_STATUS_SYNC_CHANGED |
4416               MAC_STATUS_CFG_CHANGED |
4417               MAC_STATUS_MI_COMPLETION |
4418               MAC_STATUS_LNKSTATE_CHANGED));
4419         udelay(40);
4420
4421         if (force_reset)
4422                 tg3_phy_reset(tp);
4423
4424         current_link_up = 0;
4425         current_speed = SPEED_INVALID;
4426         current_duplex = DUPLEX_INVALID;
4427
4428         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4429         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4430         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4431                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4432                         bmsr |= BMSR_LSTATUS;
4433                 else
4434                         bmsr &= ~BMSR_LSTATUS;
4435         }
4436
4437         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4438
4439         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4440             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4441                 /* do nothing, just check for link up at the end */
4442         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4443                 u32 adv, new_adv;
4444
4445                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4446                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4447                                   ADVERTISE_1000XPAUSE |
4448                                   ADVERTISE_1000XPSE_ASYM |
4449                                   ADVERTISE_SLCT);
4450
4451                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4452
4453                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4454                         new_adv |= ADVERTISE_1000XHALF;
4455                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4456                         new_adv |= ADVERTISE_1000XFULL;
4457
4458                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4459                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4460                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4461                         tg3_writephy(tp, MII_BMCR, bmcr);
4462
4463                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4464                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4465                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4466
4467                         return err;
4468                 }
4469         } else {
4470                 u32 new_bmcr;
4471
4472                 bmcr &= ~BMCR_SPEED1000;
4473                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4474
4475                 if (tp->link_config.duplex == DUPLEX_FULL)
4476                         new_bmcr |= BMCR_FULLDPLX;
4477
4478                 if (new_bmcr != bmcr) {
4479                         /* BMCR_SPEED1000 is a reserved bit that needs
4480                          * to be set on write.
4481                          */
4482                         new_bmcr |= BMCR_SPEED1000;
4483
4484                         /* Force a linkdown */
4485                         if (netif_carrier_ok(tp->dev)) {
4486                                 u32 adv;
4487
4488                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4489                                 adv &= ~(ADVERTISE_1000XFULL |
4490                                          ADVERTISE_1000XHALF |
4491                                          ADVERTISE_SLCT);
4492                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4493                                 tg3_writephy(tp, MII_BMCR, bmcr |
4494                                                            BMCR_ANRESTART |
4495                                                            BMCR_ANENABLE);
4496                                 udelay(10);
4497                                 netif_carrier_off(tp->dev);
4498                         }
4499                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4500                         bmcr = new_bmcr;
4501                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4502                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4503                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4504                             ASIC_REV_5714) {
4505                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4506                                         bmsr |= BMSR_LSTATUS;
4507                                 else
4508                                         bmsr &= ~BMSR_LSTATUS;
4509                         }
4510                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4511                 }
4512         }
4513
4514         if (bmsr & BMSR_LSTATUS) {
4515                 current_speed = SPEED_1000;
4516                 current_link_up = 1;
4517                 if (bmcr & BMCR_FULLDPLX)
4518                         current_duplex = DUPLEX_FULL;
4519                 else
4520                         current_duplex = DUPLEX_HALF;
4521
4522                 local_adv = 0;
4523                 remote_adv = 0;
4524
4525                 if (bmcr & BMCR_ANENABLE) {
4526                         u32 common;
4527
4528                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4529                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4530                         common = local_adv & remote_adv;
4531                         if (common & (ADVERTISE_1000XHALF |
4532                                       ADVERTISE_1000XFULL)) {
4533                                 if (common & ADVERTISE_1000XFULL)
4534                                         current_duplex = DUPLEX_FULL;
4535                                 else
4536                                         current_duplex = DUPLEX_HALF;
4537                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4538                                 /* Link is up via parallel detect */
4539                         } else {
4540                                 current_link_up = 0;
4541                         }
4542                 }
4543         }
4544
4545         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4546                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4547
4548         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4549         if (tp->link_config.active_duplex == DUPLEX_HALF)
4550                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4551
4552         tw32_f(MAC_MODE, tp->mac_mode);
4553         udelay(40);
4554
4555         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4556
4557         tp->link_config.active_speed = current_speed;
4558         tp->link_config.active_duplex = current_duplex;
4559
4560         if (current_link_up != netif_carrier_ok(tp->dev)) {
4561                 if (current_link_up)
4562                         netif_carrier_on(tp->dev);
4563                 else {
4564                         netif_carrier_off(tp->dev);
4565                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4566                 }
4567                 tg3_link_report(tp);
4568         }
4569         return err;
4570 }
4571
4572 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4573 {
4574         if (tp->serdes_counter) {
4575                 /* Give autoneg time to complete. */
4576                 tp->serdes_counter--;
4577                 return;
4578         }
4579
4580         if (!netif_carrier_ok(tp->dev) &&
4581             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4582                 u32 bmcr;
4583
4584                 tg3_readphy(tp, MII_BMCR, &bmcr);
4585                 if (bmcr & BMCR_ANENABLE) {
4586                         u32 phy1, phy2;
4587
4588                         /* Select shadow register 0x1f */
4589                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4590                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4591
4592                         /* Select expansion interrupt status register */
4593                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4594                                          MII_TG3_DSP_EXP1_INT_STAT);
4595                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4596                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4597
4598                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4599                                 /* We have signal detect and not receiving
4600                                  * config code words, link is up by parallel
4601                                  * detection.
4602                                  */
4603
4604                                 bmcr &= ~BMCR_ANENABLE;
4605                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4606                                 tg3_writephy(tp, MII_BMCR, bmcr);
4607                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4608                         }
4609                 }
4610         } else if (netif_carrier_ok(tp->dev) &&
4611                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4612                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4613                 u32 phy2;
4614
4615                 /* Select expansion interrupt status register */
4616                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4617                                  MII_TG3_DSP_EXP1_INT_STAT);
4618                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4619                 if (phy2 & 0x20) {
4620                         u32 bmcr;
4621
4622                         /* Config code words received, turn on autoneg. */
4623                         tg3_readphy(tp, MII_BMCR, &bmcr);
4624                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4625
4626                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4627
4628                 }
4629         }
4630 }
4631
4632 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4633 {
4634         u32 val;
4635         int err;
4636
4637         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4638                 err = tg3_setup_fiber_phy(tp, force_reset);
4639         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4640                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4641         else
4642                 err = tg3_setup_copper_phy(tp, force_reset);
4643
4644         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4645                 u32 scale;
4646
4647                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4648                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4649                         scale = 65;
4650                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4651                         scale = 6;
4652                 else
4653                         scale = 12;
4654
4655                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4656                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4657                 tw32(GRC_MISC_CFG, val);
4658         }
4659
4660         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4661               (6 << TX_LENGTHS_IPG_SHIFT);
4662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4663                 val |= tr32(MAC_TX_LENGTHS) &
4664                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4665                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4666
4667         if (tp->link_config.active_speed == SPEED_1000 &&
4668             tp->link_config.active_duplex == DUPLEX_HALF)
4669                 tw32(MAC_TX_LENGTHS, val |
4670                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4671         else
4672                 tw32(MAC_TX_LENGTHS, val |
4673                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4674
4675         if (!tg3_flag(tp, 5705_PLUS)) {
4676                 if (netif_carrier_ok(tp->dev)) {
4677                         tw32(HOSTCC_STAT_COAL_TICKS,
4678                              tp->coal.stats_block_coalesce_usecs);
4679                 } else {
4680                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4681                 }
4682         }
4683
4684         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4685                 val = tr32(PCIE_PWR_MGMT_THRESH);
4686                 if (!netif_carrier_ok(tp->dev))
4687                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4688                               tp->pwrmgmt_thresh;
4689                 else
4690                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4691                 tw32(PCIE_PWR_MGMT_THRESH, val);
4692         }
4693
4694         return err;
4695 }
4696
4697 static inline int tg3_irq_sync(struct tg3 *tp)
4698 {
4699         return tp->irq_sync;
4700 }
4701
4702 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4703 {
4704         int i;
4705
4706         dst = (u32 *)((u8 *)dst + off);
4707         for (i = 0; i < len; i += sizeof(u32))
4708                 *dst++ = tr32(off + i);
4709 }
4710
4711 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4712 {
4713         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4714         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4715         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4716         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4717         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4718         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4719         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4720         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4721         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4722         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4723         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4724         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4725         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4726         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4727         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4728         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4729         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4730         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4731         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4732
4733         if (tg3_flag(tp, SUPPORT_MSIX))
4734                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4735
4736         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4737         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4738         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4739         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4740         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4741         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4742         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4743         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4744
4745         if (!tg3_flag(tp, 5705_PLUS)) {
4746                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4747                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4748                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4749         }
4750
4751         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4752         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4753         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4754         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4755         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4756
4757         if (tg3_flag(tp, NVRAM))
4758                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4759 }
4760
4761 static void tg3_dump_state(struct tg3 *tp)
4762 {
4763         int i;
4764         u32 *regs;
4765
4766         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4767         if (!regs) {
4768                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4769                 return;
4770         }
4771
4772         if (tg3_flag(tp, PCI_EXPRESS)) {
4773                 /* Read up to but not including private PCI registers */
4774                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4775                         regs[i / sizeof(u32)] = tr32(i);
4776         } else
4777                 tg3_dump_legacy_regs(tp, regs);
4778
4779         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4780                 if (!regs[i + 0] && !regs[i + 1] &&
4781                     !regs[i + 2] && !regs[i + 3])
4782                         continue;
4783
4784                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4785                            i * 4,
4786                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4787         }
4788
4789         kfree(regs);
4790
4791         for (i = 0; i < tp->irq_cnt; i++) {
4792                 struct tg3_napi *tnapi = &tp->napi[i];
4793
4794                 /* SW status block */
4795                 netdev_err(tp->dev,
4796                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4797                            i,
4798                            tnapi->hw_status->status,
4799                            tnapi->hw_status->status_tag,
4800                            tnapi->hw_status->rx_jumbo_consumer,
4801                            tnapi->hw_status->rx_consumer,
4802                            tnapi->hw_status->rx_mini_consumer,
4803                            tnapi->hw_status->idx[0].rx_producer,
4804                            tnapi->hw_status->idx[0].tx_consumer);
4805
4806                 netdev_err(tp->dev,
4807                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4808                            i,
4809                            tnapi->last_tag, tnapi->last_irq_tag,
4810                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4811                            tnapi->rx_rcb_ptr,
4812                            tnapi->prodring.rx_std_prod_idx,
4813                            tnapi->prodring.rx_std_cons_idx,
4814                            tnapi->prodring.rx_jmb_prod_idx,
4815                            tnapi->prodring.rx_jmb_cons_idx);
4816         }
4817 }
4818
4819 /* This is called whenever we suspect that the system chipset is re-
4820  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4821  * is bogus tx completions. We try to recover by setting the
4822  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4823  * in the workqueue.
4824  */
4825 static void tg3_tx_recover(struct tg3 *tp)
4826 {
4827         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4828                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4829
4830         netdev_warn(tp->dev,
4831                     "The system may be re-ordering memory-mapped I/O "
4832                     "cycles to the network device, attempting to recover. "
4833                     "Please report the problem to the driver maintainer "
4834                     "and include system chipset information.\n");
4835
4836         spin_lock(&tp->lock);
4837         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4838         spin_unlock(&tp->lock);
4839 }
4840
4841 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4842 {
4843         /* Tell compiler to fetch tx indices from memory. */
4844         barrier();
4845         return tnapi->tx_pending -
4846                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4847 }
4848
4849 /* Tigon3 never reports partial packet sends.  So we do not
4850  * need special logic to handle SKBs that have not had all
4851  * of their frags sent yet, like SunGEM does.
4852  */
4853 static void tg3_tx(struct tg3_napi *tnapi)
4854 {
4855         struct tg3 *tp = tnapi->tp;
4856         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4857         u32 sw_idx = tnapi->tx_cons;
4858         struct netdev_queue *txq;
4859         int index = tnapi - tp->napi;
4860
4861         if (tg3_flag(tp, ENABLE_TSS))
4862                 index--;
4863
4864         txq = netdev_get_tx_queue(tp->dev, index);
4865
4866         while (sw_idx != hw_idx) {
4867                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4868                 struct sk_buff *skb = ri->skb;
4869                 int i, tx_bug = 0;
4870
4871                 if (unlikely(skb == NULL)) {
4872                         tg3_tx_recover(tp);
4873                         return;
4874                 }
4875
4876                 pci_unmap_single(tp->pdev,
4877                                  dma_unmap_addr(ri, mapping),
4878                                  skb_headlen(skb),
4879                                  PCI_DMA_TODEVICE);
4880
4881                 ri->skb = NULL;
4882
4883                 while (ri->fragmented) {
4884                         ri->fragmented = false;
4885                         sw_idx = NEXT_TX(sw_idx);
4886                         ri = &tnapi->tx_buffers[sw_idx];
4887                 }
4888
4889                 sw_idx = NEXT_TX(sw_idx);
4890
4891                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4892                         ri = &tnapi->tx_buffers[sw_idx];
4893                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4894                                 tx_bug = 1;
4895
4896                         pci_unmap_page(tp->pdev,
4897                                        dma_unmap_addr(ri, mapping),
4898                                        skb_shinfo(skb)->frags[i].size,
4899                                        PCI_DMA_TODEVICE);
4900
4901                         while (ri->fragmented) {
4902                                 ri->fragmented = false;
4903                                 sw_idx = NEXT_TX(sw_idx);
4904                                 ri = &tnapi->tx_buffers[sw_idx];
4905                         }
4906
4907                         sw_idx = NEXT_TX(sw_idx);
4908                 }
4909
4910                 dev_kfree_skb(skb);
4911
4912                 if (unlikely(tx_bug)) {
4913                         tg3_tx_recover(tp);
4914                         return;
4915                 }
4916         }
4917
4918         tnapi->tx_cons = sw_idx;
4919
4920         /* Need to make the tx_cons update visible to tg3_start_xmit()
4921          * before checking for netif_queue_stopped().  Without the
4922          * memory barrier, there is a small possibility that tg3_start_xmit()
4923          * will miss it and cause the queue to be stopped forever.
4924          */
4925         smp_mb();
4926
4927         if (unlikely(netif_tx_queue_stopped(txq) &&
4928                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4929                 __netif_tx_lock(txq, smp_processor_id());
4930                 if (netif_tx_queue_stopped(txq) &&
4931                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4932                         netif_tx_wake_queue(txq);
4933                 __netif_tx_unlock(txq);
4934         }
4935 }
4936
4937 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4938 {
4939         if (!ri->skb)
4940                 return;
4941
4942         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4943                          map_sz, PCI_DMA_FROMDEVICE);
4944         dev_kfree_skb_any(ri->skb);
4945         ri->skb = NULL;
4946 }
4947
4948 /* Returns size of skb allocated or < 0 on error.
4949  *
4950  * We only need to fill in the address because the other members
4951  * of the RX descriptor are invariant, see tg3_init_rings.
4952  *
4953  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4954  * posting buffers we only dirty the first cache line of the RX
4955  * descriptor (containing the address).  Whereas for the RX status
4956  * buffers the cpu only reads the last cacheline of the RX descriptor
4957  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4958  */
4959 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4960                             u32 opaque_key, u32 dest_idx_unmasked)
4961 {
4962         struct tg3_rx_buffer_desc *desc;
4963         struct ring_info *map;
4964         struct sk_buff *skb;
4965         dma_addr_t mapping;
4966         int skb_size, dest_idx;
4967
4968         switch (opaque_key) {
4969         case RXD_OPAQUE_RING_STD:
4970                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4971                 desc = &tpr->rx_std[dest_idx];
4972                 map = &tpr->rx_std_buffers[dest_idx];
4973                 skb_size = tp->rx_pkt_map_sz;
4974                 break;
4975
4976         case RXD_OPAQUE_RING_JUMBO:
4977                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4978                 desc = &tpr->rx_jmb[dest_idx].std;
4979                 map = &tpr->rx_jmb_buffers[dest_idx];
4980                 skb_size = TG3_RX_JMB_MAP_SZ;
4981                 break;
4982
4983         default:
4984                 return -EINVAL;
4985         }
4986
4987         /* Do not overwrite any of the map or rp information
4988          * until we are sure we can commit to a new buffer.
4989          *
4990          * Callers depend upon this behavior and assume that
4991          * we leave everything unchanged if we fail.
4992          */
4993         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
4994         if (skb == NULL)
4995                 return -ENOMEM;
4996
4997         skb_reserve(skb, TG3_RX_OFFSET(tp));
4998
4999         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5000                                  PCI_DMA_FROMDEVICE);
5001         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5002                 dev_kfree_skb(skb);
5003                 return -EIO;
5004         }
5005
5006         map->skb = skb;
5007         dma_unmap_addr_set(map, mapping, mapping);
5008
5009         desc->addr_hi = ((u64)mapping >> 32);
5010         desc->addr_lo = ((u64)mapping & 0xffffffff);
5011
5012         return skb_size;
5013 }
5014
5015 /* We only need to move over in the address because the other
5016  * members of the RX descriptor are invariant.  See notes above
5017  * tg3_alloc_rx_skb for full details.
5018  */
5019 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5020                            struct tg3_rx_prodring_set *dpr,
5021                            u32 opaque_key, int src_idx,
5022                            u32 dest_idx_unmasked)
5023 {
5024         struct tg3 *tp = tnapi->tp;
5025         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5026         struct ring_info *src_map, *dest_map;
5027         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5028         int dest_idx;
5029
5030         switch (opaque_key) {
5031         case RXD_OPAQUE_RING_STD:
5032                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5033                 dest_desc = &dpr->rx_std[dest_idx];
5034                 dest_map = &dpr->rx_std_buffers[dest_idx];
5035                 src_desc = &spr->rx_std[src_idx];
5036                 src_map = &spr->rx_std_buffers[src_idx];
5037                 break;
5038
5039         case RXD_OPAQUE_RING_JUMBO:
5040                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5041                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5042                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5043                 src_desc = &spr->rx_jmb[src_idx].std;
5044                 src_map = &spr->rx_jmb_buffers[src_idx];
5045                 break;
5046
5047         default:
5048                 return;
5049         }
5050
5051         dest_map->skb = src_map->skb;
5052         dma_unmap_addr_set(dest_map, mapping,
5053                            dma_unmap_addr(src_map, mapping));
5054         dest_desc->addr_hi = src_desc->addr_hi;
5055         dest_desc->addr_lo = src_desc->addr_lo;
5056
5057         /* Ensure that the update to the skb happens after the physical
5058          * addresses have been transferred to the new BD location.
5059          */
5060         smp_wmb();
5061
5062         src_map->skb = NULL;
5063 }
5064
5065 /* The RX ring scheme is composed of multiple rings which post fresh
5066  * buffers to the chip, and one special ring the chip uses to report
5067  * status back to the host.
5068  *
5069  * The special ring reports the status of received packets to the
5070  * host.  The chip does not write into the original descriptor the
5071  * RX buffer was obtained from.  The chip simply takes the original
5072  * descriptor as provided by the host, updates the status and length
5073  * field, then writes this into the next status ring entry.
5074  *
5075  * Each ring the host uses to post buffers to the chip is described
5076  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5077  * it is first placed into the on-chip ram.  When the packet's length
5078  * is known, it walks down the TG3_BDINFO entries to select the ring.
5079  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5080  * which is within the range of the new packet's length is chosen.
5081  *
5082  * The "separate ring for rx status" scheme may sound queer, but it makes
5083  * sense from a cache coherency perspective.  If only the host writes
5084  * to the buffer post rings, and only the chip writes to the rx status
5085  * rings, then cache lines never move beyond shared-modified state.
5086  * If both the host and chip were to write into the same ring, cache line
5087  * eviction could occur since both entities want it in an exclusive state.
5088  */
5089 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5090 {
5091         struct tg3 *tp = tnapi->tp;
5092         u32 work_mask, rx_std_posted = 0;
5093         u32 std_prod_idx, jmb_prod_idx;
5094         u32 sw_idx = tnapi->rx_rcb_ptr;
5095         u16 hw_idx;
5096         int received;
5097         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5098
5099         hw_idx = *(tnapi->rx_rcb_prod_idx);
5100         /*
5101          * We need to order the read of hw_idx and the read of
5102          * the opaque cookie.
5103          */
5104         rmb();
5105         work_mask = 0;
5106         received = 0;
5107         std_prod_idx = tpr->rx_std_prod_idx;
5108         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5109         while (sw_idx != hw_idx && budget > 0) {
5110                 struct ring_info *ri;
5111                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5112                 unsigned int len;
5113                 struct sk_buff *skb;
5114                 dma_addr_t dma_addr;
5115                 u32 opaque_key, desc_idx, *post_ptr;
5116
5117                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5118                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5119                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5120                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5121                         dma_addr = dma_unmap_addr(ri, mapping);
5122                         skb = ri->skb;
5123                         post_ptr = &std_prod_idx;
5124                         rx_std_posted++;
5125                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5126                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5127                         dma_addr = dma_unmap_addr(ri, mapping);
5128                         skb = ri->skb;
5129                         post_ptr = &jmb_prod_idx;
5130                 } else
5131                         goto next_pkt_nopost;
5132
5133                 work_mask |= opaque_key;
5134
5135                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5136                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5137                 drop_it:
5138                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5139                                        desc_idx, *post_ptr);
5140                 drop_it_no_recycle:
5141                         /* Other statistics kept track of by card. */
5142                         tp->rx_dropped++;
5143                         goto next_pkt;
5144                 }
5145
5146                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5147                       ETH_FCS_LEN;
5148
5149                 if (len > TG3_RX_COPY_THRESH(tp)) {
5150                         int skb_size;
5151
5152                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5153                                                     *post_ptr);
5154                         if (skb_size < 0)
5155                                 goto drop_it;
5156
5157                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5158                                          PCI_DMA_FROMDEVICE);
5159
5160                         /* Ensure that the update to the skb happens
5161                          * after the usage of the old DMA mapping.
5162                          */
5163                         smp_wmb();
5164
5165                         ri->skb = NULL;
5166
5167                         skb_put(skb, len);
5168                 } else {
5169                         struct sk_buff *copy_skb;
5170
5171                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5172                                        desc_idx, *post_ptr);
5173
5174                         copy_skb = netdev_alloc_skb(tp->dev, len +
5175                                                     TG3_RAW_IP_ALIGN);
5176                         if (copy_skb == NULL)
5177                                 goto drop_it_no_recycle;
5178
5179                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5180                         skb_put(copy_skb, len);
5181                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5182                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5183                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5184
5185                         /* We'll reuse the original ring buffer. */
5186                         skb = copy_skb;
5187                 }
5188
5189                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5190                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5191                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5192                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5193                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5194                 else
5195                         skb_checksum_none_assert(skb);
5196
5197                 skb->protocol = eth_type_trans(skb, tp->dev);
5198
5199                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5200                     skb->protocol != htons(ETH_P_8021Q)) {
5201                         dev_kfree_skb(skb);
5202                         goto drop_it_no_recycle;
5203                 }
5204
5205                 if (desc->type_flags & RXD_FLAG_VLAN &&
5206                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5207                         __vlan_hwaccel_put_tag(skb,
5208                                                desc->err_vlan & RXD_VLAN_MASK);
5209
5210                 napi_gro_receive(&tnapi->napi, skb);
5211
5212                 received++;
5213                 budget--;
5214
5215 next_pkt:
5216                 (*post_ptr)++;
5217
5218                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5219                         tpr->rx_std_prod_idx = std_prod_idx &
5220                                                tp->rx_std_ring_mask;
5221                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5222                                      tpr->rx_std_prod_idx);
5223                         work_mask &= ~RXD_OPAQUE_RING_STD;
5224                         rx_std_posted = 0;
5225                 }
5226 next_pkt_nopost:
5227                 sw_idx++;
5228                 sw_idx &= tp->rx_ret_ring_mask;
5229
5230                 /* Refresh hw_idx to see if there is new work */
5231                 if (sw_idx == hw_idx) {
5232                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5233                         rmb();
5234                 }
5235         }
5236
5237         /* ACK the status ring. */
5238         tnapi->rx_rcb_ptr = sw_idx;
5239         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5240
5241         /* Refill RX ring(s). */
5242         if (!tg3_flag(tp, ENABLE_RSS)) {
5243                 if (work_mask & RXD_OPAQUE_RING_STD) {
5244                         tpr->rx_std_prod_idx = std_prod_idx &
5245                                                tp->rx_std_ring_mask;
5246                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5247                                      tpr->rx_std_prod_idx);
5248                 }
5249                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5250                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5251                                                tp->rx_jmb_ring_mask;
5252                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5253                                      tpr->rx_jmb_prod_idx);
5254                 }
5255                 mmiowb();
5256         } else if (work_mask) {
5257                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5258                  * updated before the producer indices can be updated.
5259                  */
5260                 smp_wmb();
5261
5262                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5263                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5264
5265                 if (tnapi != &tp->napi[1])
5266                         napi_schedule(&tp->napi[1].napi);
5267         }
5268
5269         return received;
5270 }
5271
5272 static void tg3_poll_link(struct tg3 *tp)
5273 {
5274         /* handle link change and other phy events */
5275         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5276                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5277
5278                 if (sblk->status & SD_STATUS_LINK_CHG) {
5279                         sblk->status = SD_STATUS_UPDATED |
5280                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5281                         spin_lock(&tp->lock);
5282                         if (tg3_flag(tp, USE_PHYLIB)) {
5283                                 tw32_f(MAC_STATUS,
5284                                      (MAC_STATUS_SYNC_CHANGED |
5285                                       MAC_STATUS_CFG_CHANGED |
5286                                       MAC_STATUS_MI_COMPLETION |
5287                                       MAC_STATUS_LNKSTATE_CHANGED));
5288                                 udelay(40);
5289                         } else
5290                                 tg3_setup_phy(tp, 0);
5291                         spin_unlock(&tp->lock);
5292                 }
5293         }
5294 }
5295
5296 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5297                                 struct tg3_rx_prodring_set *dpr,
5298                                 struct tg3_rx_prodring_set *spr)
5299 {
5300         u32 si, di, cpycnt, src_prod_idx;
5301         int i, err = 0;
5302
5303         while (1) {
5304                 src_prod_idx = spr->rx_std_prod_idx;
5305
5306                 /* Make sure updates to the rx_std_buffers[] entries and the
5307                  * standard producer index are seen in the correct order.
5308                  */
5309                 smp_rmb();
5310
5311                 if (spr->rx_std_cons_idx == src_prod_idx)
5312                         break;
5313
5314                 if (spr->rx_std_cons_idx < src_prod_idx)
5315                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5316                 else
5317                         cpycnt = tp->rx_std_ring_mask + 1 -
5318                                  spr->rx_std_cons_idx;
5319
5320                 cpycnt = min(cpycnt,
5321                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5322
5323                 si = spr->rx_std_cons_idx;
5324                 di = dpr->rx_std_prod_idx;
5325
5326                 for (i = di; i < di + cpycnt; i++) {
5327                         if (dpr->rx_std_buffers[i].skb) {
5328                                 cpycnt = i - di;
5329                                 err = -ENOSPC;
5330                                 break;
5331                         }
5332                 }
5333
5334                 if (!cpycnt)
5335                         break;
5336
5337                 /* Ensure that updates to the rx_std_buffers ring and the
5338                  * shadowed hardware producer ring from tg3_recycle_skb() are
5339                  * ordered correctly WRT the skb check above.
5340                  */
5341                 smp_rmb();
5342
5343                 memcpy(&dpr->rx_std_buffers[di],
5344                        &spr->rx_std_buffers[si],
5345                        cpycnt * sizeof(struct ring_info));
5346
5347                 for (i = 0; i < cpycnt; i++, di++, si++) {
5348                         struct tg3_rx_buffer_desc *sbd, *dbd;
5349                         sbd = &spr->rx_std[si];
5350                         dbd = &dpr->rx_std[di];
5351                         dbd->addr_hi = sbd->addr_hi;
5352                         dbd->addr_lo = sbd->addr_lo;
5353                 }
5354
5355                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5356                                        tp->rx_std_ring_mask;
5357                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5358                                        tp->rx_std_ring_mask;
5359         }
5360
5361         while (1) {
5362                 src_prod_idx = spr->rx_jmb_prod_idx;
5363
5364                 /* Make sure updates to the rx_jmb_buffers[] entries and
5365                  * the jumbo producer index are seen in the correct order.
5366                  */
5367                 smp_rmb();
5368
5369                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5370                         break;
5371
5372                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5373                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5374                 else
5375                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5376                                  spr->rx_jmb_cons_idx;
5377
5378                 cpycnt = min(cpycnt,
5379                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5380
5381                 si = spr->rx_jmb_cons_idx;
5382                 di = dpr->rx_jmb_prod_idx;
5383
5384                 for (i = di; i < di + cpycnt; i++) {
5385                         if (dpr->rx_jmb_buffers[i].skb) {
5386                                 cpycnt = i - di;
5387                                 err = -ENOSPC;
5388                                 break;
5389                         }
5390                 }
5391
5392                 if (!cpycnt)
5393                         break;
5394
5395                 /* Ensure that updates to the rx_jmb_buffers ring and the
5396                  * shadowed hardware producer ring from tg3_recycle_skb() are
5397                  * ordered correctly WRT the skb check above.
5398                  */
5399                 smp_rmb();
5400
5401                 memcpy(&dpr->rx_jmb_buffers[di],
5402                        &spr->rx_jmb_buffers[si],
5403                        cpycnt * sizeof(struct ring_info));
5404
5405                 for (i = 0; i < cpycnt; i++, di++, si++) {
5406                         struct tg3_rx_buffer_desc *sbd, *dbd;
5407                         sbd = &spr->rx_jmb[si].std;
5408                         dbd = &dpr->rx_jmb[di].std;
5409                         dbd->addr_hi = sbd->addr_hi;
5410                         dbd->addr_lo = sbd->addr_lo;
5411                 }
5412
5413                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5414                                        tp->rx_jmb_ring_mask;
5415                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5416                                        tp->rx_jmb_ring_mask;
5417         }
5418
5419         return err;
5420 }
5421
5422 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5423 {
5424         struct tg3 *tp = tnapi->tp;
5425
5426         /* run TX completion thread */
5427         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5428                 tg3_tx(tnapi);
5429                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5430                         return work_done;
5431         }
5432
5433         /* run RX thread, within the bounds set by NAPI.
5434          * All RX "locking" is done by ensuring outside
5435          * code synchronizes with tg3->napi.poll()
5436          */
5437         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5438                 work_done += tg3_rx(tnapi, budget - work_done);
5439
5440         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5441                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5442                 int i, err = 0;
5443                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5444                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5445
5446                 for (i = 1; i < tp->irq_cnt; i++)
5447                         err |= tg3_rx_prodring_xfer(tp, dpr,
5448                                                     &tp->napi[i].prodring);
5449
5450                 wmb();
5451
5452                 if (std_prod_idx != dpr->rx_std_prod_idx)
5453                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5454                                      dpr->rx_std_prod_idx);
5455
5456                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5457                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5458                                      dpr->rx_jmb_prod_idx);
5459
5460                 mmiowb();
5461
5462                 if (err)
5463                         tw32_f(HOSTCC_MODE, tp->coal_now);
5464         }
5465
5466         return work_done;
5467 }
5468
5469 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5470 {
5471         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5472         struct tg3 *tp = tnapi->tp;
5473         int work_done = 0;
5474         struct tg3_hw_status *sblk = tnapi->hw_status;
5475
5476         while (1) {
5477                 work_done = tg3_poll_work(tnapi, work_done, budget);
5478
5479                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5480                         goto tx_recovery;
5481
5482                 if (unlikely(work_done >= budget))
5483                         break;
5484
5485                 /* tp->last_tag is used in tg3_int_reenable() below
5486                  * to tell the hw how much work has been processed,
5487                  * so we must read it before checking for more work.
5488                  */
5489                 tnapi->last_tag = sblk->status_tag;
5490                 tnapi->last_irq_tag = tnapi->last_tag;
5491                 rmb();
5492
5493                 /* check for RX/TX work to do */
5494                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5495                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5496                         napi_complete(napi);
5497                         /* Reenable interrupts. */
5498                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5499                         mmiowb();
5500                         break;
5501                 }
5502         }
5503
5504         return work_done;
5505
5506 tx_recovery:
5507         /* work_done is guaranteed to be less than budget. */
5508         napi_complete(napi);
5509         schedule_work(&tp->reset_task);
5510         return work_done;
5511 }
5512
5513 static void tg3_process_error(struct tg3 *tp)
5514 {
5515         u32 val;
5516         bool real_error = false;
5517
5518         if (tg3_flag(tp, ERROR_PROCESSED))
5519                 return;
5520
5521         /* Check Flow Attention register */
5522         val = tr32(HOSTCC_FLOW_ATTN);
5523         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5524                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5525                 real_error = true;
5526         }
5527
5528         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5529                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5530                 real_error = true;
5531         }
5532
5533         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5534                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5535                 real_error = true;
5536         }
5537
5538         if (!real_error)
5539                 return;
5540
5541         tg3_dump_state(tp);
5542
5543         tg3_flag_set(tp, ERROR_PROCESSED);
5544         schedule_work(&tp->reset_task);
5545 }
5546
5547 static int tg3_poll(struct napi_struct *napi, int budget)
5548 {
5549         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5550         struct tg3 *tp = tnapi->tp;
5551         int work_done = 0;
5552         struct tg3_hw_status *sblk = tnapi->hw_status;
5553
5554         while (1) {
5555                 if (sblk->status & SD_STATUS_ERROR)
5556                         tg3_process_error(tp);
5557
5558                 tg3_poll_link(tp);
5559
5560                 work_done = tg3_poll_work(tnapi, work_done, budget);
5561
5562                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5563                         goto tx_recovery;
5564
5565                 if (unlikely(work_done >= budget))
5566                         break;
5567
5568                 if (tg3_flag(tp, TAGGED_STATUS)) {
5569                         /* tp->last_tag is used in tg3_int_reenable() below
5570                          * to tell the hw how much work has been processed,
5571                          * so we must read it before checking for more work.
5572                          */
5573                         tnapi->last_tag = sblk->status_tag;
5574                         tnapi->last_irq_tag = tnapi->last_tag;
5575                         rmb();
5576                 } else
5577                         sblk->status &= ~SD_STATUS_UPDATED;
5578
5579                 if (likely(!tg3_has_work(tnapi))) {
5580                         napi_complete(napi);
5581                         tg3_int_reenable(tnapi);
5582                         break;
5583                 }
5584         }
5585
5586         return work_done;
5587
5588 tx_recovery:
5589         /* work_done is guaranteed to be less than budget. */
5590         napi_complete(napi);
5591         schedule_work(&tp->reset_task);
5592         return work_done;
5593 }
5594
5595 static void tg3_napi_disable(struct tg3 *tp)
5596 {
5597         int i;
5598
5599         for (i = tp->irq_cnt - 1; i >= 0; i--)
5600                 napi_disable(&tp->napi[i].napi);
5601 }
5602
5603 static void tg3_napi_enable(struct tg3 *tp)
5604 {
5605         int i;
5606
5607         for (i = 0; i < tp->irq_cnt; i++)
5608                 napi_enable(&tp->napi[i].napi);
5609 }
5610
5611 static void tg3_napi_init(struct tg3 *tp)
5612 {
5613         int i;
5614
5615         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5616         for (i = 1; i < tp->irq_cnt; i++)
5617                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5618 }
5619
5620 static void tg3_napi_fini(struct tg3 *tp)
5621 {
5622         int i;
5623
5624         for (i = 0; i < tp->irq_cnt; i++)
5625                 netif_napi_del(&tp->napi[i].napi);
5626 }
5627
5628 static inline void tg3_netif_stop(struct tg3 *tp)
5629 {
5630         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5631         tg3_napi_disable(tp);
5632         netif_tx_disable(tp->dev);
5633 }
5634
5635 static inline void tg3_netif_start(struct tg3 *tp)
5636 {
5637         /* NOTE: unconditional netif_tx_wake_all_queues is only
5638          * appropriate so long as all callers are assured to
5639          * have free tx slots (such as after tg3_init_hw)
5640          */
5641         netif_tx_wake_all_queues(tp->dev);
5642
5643         tg3_napi_enable(tp);
5644         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5645         tg3_enable_ints(tp);
5646 }
5647
5648 static void tg3_irq_quiesce(struct tg3 *tp)
5649 {
5650         int i;
5651
5652         BUG_ON(tp->irq_sync);
5653
5654         tp->irq_sync = 1;
5655         smp_mb();
5656
5657         for (i = 0; i < tp->irq_cnt; i++)
5658                 synchronize_irq(tp->napi[i].irq_vec);
5659 }
5660
5661 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5662  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5663  * with as well.  Most of the time, this is not necessary except when
5664  * shutting down the device.
5665  */
5666 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5667 {
5668         spin_lock_bh(&tp->lock);
5669         if (irq_sync)
5670                 tg3_irq_quiesce(tp);
5671 }
5672
5673 static inline void tg3_full_unlock(struct tg3 *tp)
5674 {
5675         spin_unlock_bh(&tp->lock);
5676 }
5677
5678 /* One-shot MSI handler - Chip automatically disables interrupt
5679  * after sending MSI so driver doesn't have to do it.
5680  */
5681 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5682 {
5683         struct tg3_napi *tnapi = dev_id;
5684         struct tg3 *tp = tnapi->tp;
5685
5686         prefetch(tnapi->hw_status);
5687         if (tnapi->rx_rcb)
5688                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5689
5690         if (likely(!tg3_irq_sync(tp)))
5691                 napi_schedule(&tnapi->napi);
5692
5693         return IRQ_HANDLED;
5694 }
5695
5696 /* MSI ISR - No need to check for interrupt sharing and no need to
5697  * flush status block and interrupt mailbox. PCI ordering rules
5698  * guarantee that MSI will arrive after the status block.
5699  */
5700 static irqreturn_t tg3_msi(int irq, void *dev_id)
5701 {
5702         struct tg3_napi *tnapi = dev_id;
5703         struct tg3 *tp = tnapi->tp;
5704
5705         prefetch(tnapi->hw_status);
5706         if (tnapi->rx_rcb)
5707                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5708         /*
5709          * Writing any value to intr-mbox-0 clears PCI INTA# and
5710          * chip-internal interrupt pending events.
5711          * Writing non-zero to intr-mbox-0 additional tells the
5712          * NIC to stop sending us irqs, engaging "in-intr-handler"
5713          * event coalescing.
5714          */
5715         tw32_mailbox(tnapi->int_mbox, 0x00000001);
5716         if (likely(!tg3_irq_sync(tp)))
5717                 napi_schedule(&tnapi->napi);
5718
5719         return IRQ_RETVAL(1);
5720 }
5721
5722 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5723 {
5724         struct tg3_napi *tnapi = dev_id;
5725         struct tg3 *tp = tnapi->tp;
5726         struct tg3_hw_status *sblk = tnapi->hw_status;
5727         unsigned int handled = 1;
5728
5729         /* In INTx mode, it is possible for the interrupt to arrive at
5730          * the CPU before the status block posted prior to the interrupt.
5731          * Reading the PCI State register will confirm whether the
5732          * interrupt is ours and will flush the status block.
5733          */
5734         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5735                 if (tg3_flag(tp, CHIP_RESETTING) ||
5736                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5737                         handled = 0;
5738                         goto out;
5739                 }
5740         }
5741
5742         /*
5743          * Writing any value to intr-mbox-0 clears PCI INTA# and
5744          * chip-internal interrupt pending events.
5745          * Writing non-zero to intr-mbox-0 additional tells the
5746          * NIC to stop sending us irqs, engaging "in-intr-handler"
5747          * event coalescing.
5748          *
5749          * Flush the mailbox to de-assert the IRQ immediately to prevent
5750          * spurious interrupts.  The flush impacts performance but
5751          * excessive spurious interrupts can be worse in some cases.
5752          */
5753         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5754         if (tg3_irq_sync(tp))
5755                 goto out;
5756         sblk->status &= ~SD_STATUS_UPDATED;
5757         if (likely(tg3_has_work(tnapi))) {
5758                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5759                 napi_schedule(&tnapi->napi);
5760         } else {
5761                 /* No work, shared interrupt perhaps?  re-enable
5762                  * interrupts, and flush that PCI write
5763                  */
5764                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5765                                0x00000000);
5766         }
5767 out:
5768         return IRQ_RETVAL(handled);
5769 }
5770
5771 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5772 {
5773         struct tg3_napi *tnapi = dev_id;
5774         struct tg3 *tp = tnapi->tp;
5775         struct tg3_hw_status *sblk = tnapi->hw_status;
5776         unsigned int handled = 1;
5777
5778         /* In INTx mode, it is possible for the interrupt to arrive at
5779          * the CPU before the status block posted prior to the interrupt.
5780          * Reading the PCI State register will confirm whether the
5781          * interrupt is ours and will flush the status block.
5782          */
5783         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5784                 if (tg3_flag(tp, CHIP_RESETTING) ||
5785                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5786                         handled = 0;
5787                         goto out;
5788                 }
5789         }
5790
5791         /*
5792          * writing any value to intr-mbox-0 clears PCI INTA# and
5793          * chip-internal interrupt pending events.
5794          * writing non-zero to intr-mbox-0 additional tells the
5795          * NIC to stop sending us irqs, engaging "in-intr-handler"
5796          * event coalescing.
5797          *
5798          * Flush the mailbox to de-assert the IRQ immediately to prevent
5799          * spurious interrupts.  The flush impacts performance but
5800          * excessive spurious interrupts can be worse in some cases.
5801          */
5802         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5803
5804         /*
5805          * In a shared interrupt configuration, sometimes other devices'
5806          * interrupts will scream.  We record the current status tag here
5807          * so that the above check can report that the screaming interrupts
5808          * are unhandled.  Eventually they will be silenced.
5809          */
5810         tnapi->last_irq_tag = sblk->status_tag;
5811
5812         if (tg3_irq_sync(tp))
5813                 goto out;
5814
5815         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5816
5817         napi_schedule(&tnapi->napi);
5818
5819 out:
5820         return IRQ_RETVAL(handled);
5821 }
5822
5823 /* ISR for interrupt test */
5824 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5825 {
5826         struct tg3_napi *tnapi = dev_id;
5827         struct tg3 *tp = tnapi->tp;
5828         struct tg3_hw_status *sblk = tnapi->hw_status;
5829
5830         if ((sblk->status & SD_STATUS_UPDATED) ||
5831             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5832                 tg3_disable_ints(tp);
5833                 return IRQ_RETVAL(1);
5834         }
5835         return IRQ_RETVAL(0);
5836 }
5837
5838 static int tg3_init_hw(struct tg3 *, int);
5839 static int tg3_halt(struct tg3 *, int, int);
5840
5841 /* Restart hardware after configuration changes, self-test, etc.
5842  * Invoked with tp->lock held.
5843  */
5844 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5845         __releases(tp->lock)
5846         __acquires(tp->lock)
5847 {
5848         int err;
5849
5850         err = tg3_init_hw(tp, reset_phy);
5851         if (err) {
5852                 netdev_err(tp->dev,
5853                            "Failed to re-initialize device, aborting\n");
5854                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5855                 tg3_full_unlock(tp);
5856                 del_timer_sync(&tp->timer);
5857                 tp->irq_sync = 0;
5858                 tg3_napi_enable(tp);
5859                 dev_close(tp->dev);
5860                 tg3_full_lock(tp, 0);
5861         }
5862         return err;
5863 }
5864
5865 #ifdef CONFIG_NET_POLL_CONTROLLER
5866 static void tg3_poll_controller(struct net_device *dev)
5867 {
5868         int i;
5869         struct tg3 *tp = netdev_priv(dev);
5870
5871         for (i = 0; i < tp->irq_cnt; i++)
5872                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5873 }
5874 #endif
5875
5876 static void tg3_reset_task(struct work_struct *work)
5877 {
5878         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5879         int err;
5880         unsigned int restart_timer;
5881
5882         tg3_full_lock(tp, 0);
5883
5884         if (!netif_running(tp->dev)) {
5885                 tg3_full_unlock(tp);
5886                 return;
5887         }
5888
5889         tg3_full_unlock(tp);
5890
5891         tg3_phy_stop(tp);
5892
5893         tg3_netif_stop(tp);
5894
5895         tg3_full_lock(tp, 1);
5896
5897         restart_timer = tg3_flag(tp, RESTART_TIMER);
5898         tg3_flag_clear(tp, RESTART_TIMER);
5899
5900         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5901                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5902                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5903                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5904                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5905         }
5906
5907         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5908         err = tg3_init_hw(tp, 1);
5909         if (err)
5910                 goto out;
5911
5912         tg3_netif_start(tp);
5913
5914         if (restart_timer)
5915                 mod_timer(&tp->timer, jiffies + 1);
5916
5917 out:
5918         tg3_full_unlock(tp);
5919
5920         if (!err)
5921                 tg3_phy_start(tp);
5922 }
5923
5924 static void tg3_tx_timeout(struct net_device *dev)
5925 {
5926         struct tg3 *tp = netdev_priv(dev);
5927
5928         if (netif_msg_tx_err(tp)) {
5929                 netdev_err(dev, "transmit timed out, resetting\n");
5930                 tg3_dump_state(tp);
5931         }
5932
5933         schedule_work(&tp->reset_task);
5934 }
5935
5936 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5937 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5938 {
5939         u32 base = (u32) mapping & 0xffffffff;
5940
5941         return (base > 0xffffdcc0) && (base + len + 8 < base);
5942 }
5943
5944 /* Test for DMA addresses > 40-bit */
5945 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5946                                           int len)
5947 {
5948 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5949         if (tg3_flag(tp, 40BIT_DMA_BUG))
5950                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5951         return 0;
5952 #else
5953         return 0;
5954 #endif
5955 }
5956
5957 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5958                                  dma_addr_t mapping, u32 len, u32 flags,
5959                                  u32 mss, u32 vlan)
5960 {
5961         txbd->addr_hi = ((u64) mapping >> 32);
5962         txbd->addr_lo = ((u64) mapping & 0xffffffff);
5963         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5964         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5965 }
5966
5967 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5968                             dma_addr_t map, u32 len, u32 flags,
5969                             u32 mss, u32 vlan)
5970 {
5971         struct tg3 *tp = tnapi->tp;
5972         bool hwbug = false;
5973
5974         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5975                 hwbug = 1;
5976
5977         if (tg3_4g_overflow_test(map, len))
5978                 hwbug = 1;
5979
5980         if (tg3_40bit_overflow_test(tp, map, len))
5981                 hwbug = 1;
5982
5983         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5984                 u32 tmp_flag = flags & ~TXD_FLAG_END;
5985                 while (len > TG3_TX_BD_DMA_MAX) {
5986                         u32 frag_len = TG3_TX_BD_DMA_MAX;
5987                         len -= TG3_TX_BD_DMA_MAX;
5988
5989                         if (len) {
5990                                 tnapi->tx_buffers[*entry].fragmented = true;
5991                                 /* Avoid the 8byte DMA problem */
5992                                 if (len <= 8) {
5993                                         len += TG3_TX_BD_DMA_MAX / 2;
5994                                         frag_len = TG3_TX_BD_DMA_MAX / 2;
5995                                 }
5996                         } else
5997                                 tmp_flag = flags;
5998
5999                         if (*budget) {
6000                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6001                                               frag_len, tmp_flag, mss, vlan);
6002                                 (*budget)--;
6003                                 *entry = NEXT_TX(*entry);
6004                         } else {
6005                                 hwbug = 1;
6006                                 break;
6007                         }
6008
6009                         map += frag_len;
6010                 }
6011
6012                 if (len) {
6013                         if (*budget) {
6014                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6015                                               len, flags, mss, vlan);
6016                                 (*budget)--;
6017                                 *entry = NEXT_TX(*entry);
6018                         } else {
6019                                 hwbug = 1;
6020                         }
6021                 }
6022         } else {
6023                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6024                               len, flags, mss, vlan);
6025                 *entry = NEXT_TX(*entry);
6026         }
6027
6028         return hwbug;
6029 }
6030
6031 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6032 {
6033         int i;
6034         struct sk_buff *skb;
6035         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6036
6037         skb = txb->skb;
6038         txb->skb = NULL;
6039
6040         pci_unmap_single(tnapi->tp->pdev,
6041                          dma_unmap_addr(txb, mapping),
6042                          skb_headlen(skb),
6043                          PCI_DMA_TODEVICE);
6044
6045         while (txb->fragmented) {
6046                 txb->fragmented = false;
6047                 entry = NEXT_TX(entry);
6048                 txb = &tnapi->tx_buffers[entry];
6049         }
6050
6051         for (i = 0; i < last; i++) {
6052                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6053
6054                 entry = NEXT_TX(entry);
6055                 txb = &tnapi->tx_buffers[entry];
6056
6057                 pci_unmap_page(tnapi->tp->pdev,
6058                                dma_unmap_addr(txb, mapping),
6059                                frag->size, PCI_DMA_TODEVICE);
6060
6061                 while (txb->fragmented) {
6062                         txb->fragmented = false;
6063                         entry = NEXT_TX(entry);
6064                         txb = &tnapi->tx_buffers[entry];
6065                 }
6066         }
6067 }
6068
6069 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6070 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6071                                        struct sk_buff *skb,
6072                                        u32 *entry, u32 *budget,
6073                                        u32 base_flags, u32 mss, u32 vlan)
6074 {
6075         struct tg3 *tp = tnapi->tp;
6076         struct sk_buff *new_skb;
6077         dma_addr_t new_addr = 0;
6078         int ret = 0;
6079
6080         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6081                 new_skb = skb_copy(skb, GFP_ATOMIC);
6082         else {
6083                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6084
6085                 new_skb = skb_copy_expand(skb,
6086                                           skb_headroom(skb) + more_headroom,
6087                                           skb_tailroom(skb), GFP_ATOMIC);
6088         }
6089
6090         if (!new_skb) {
6091                 ret = -1;
6092         } else {
6093                 /* New SKB is guaranteed to be linear. */
6094                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6095                                           PCI_DMA_TODEVICE);
6096                 /* Make sure the mapping succeeded */
6097                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6098                         dev_kfree_skb(new_skb);
6099                         ret = -1;
6100                 } else {
6101                         base_flags |= TXD_FLAG_END;
6102
6103                         tnapi->tx_buffers[*entry].skb = new_skb;
6104                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6105                                            mapping, new_addr);
6106
6107                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6108                                             new_skb->len, base_flags,
6109                                             mss, vlan)) {
6110                                 tg3_tx_skb_unmap(tnapi, *entry, 0);
6111                                 dev_kfree_skb(new_skb);
6112                                 ret = -1;
6113                         }
6114                 }
6115         }
6116
6117         dev_kfree_skb(skb);
6118
6119         return ret;
6120 }
6121
6122 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6123
6124 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6125  * TSO header is greater than 80 bytes.
6126  */
6127 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6128 {
6129         struct sk_buff *segs, *nskb;
6130         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6131
6132         /* Estimate the number of fragments in the worst case */
6133         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6134                 netif_stop_queue(tp->dev);
6135
6136                 /* netif_tx_stop_queue() must be done before checking
6137                  * checking tx index in tg3_tx_avail() below, because in
6138                  * tg3_tx(), we update tx index before checking for
6139                  * netif_tx_queue_stopped().
6140                  */
6141                 smp_mb();
6142                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6143                         return NETDEV_TX_BUSY;
6144
6145                 netif_wake_queue(tp->dev);
6146         }
6147
6148         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6149         if (IS_ERR(segs))
6150                 goto tg3_tso_bug_end;
6151
6152         do {
6153                 nskb = segs;
6154                 segs = segs->next;
6155                 nskb->next = NULL;
6156                 tg3_start_xmit(nskb, tp->dev);
6157         } while (segs);
6158
6159 tg3_tso_bug_end:
6160         dev_kfree_skb(skb);
6161
6162         return NETDEV_TX_OK;
6163 }
6164
6165 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6166  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6167  */
6168 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6169 {
6170         struct tg3 *tp = netdev_priv(dev);
6171         u32 len, entry, base_flags, mss, vlan = 0;
6172         u32 budget;
6173         int i = -1, would_hit_hwbug;
6174         dma_addr_t mapping;
6175         struct tg3_napi *tnapi;
6176         struct netdev_queue *txq;
6177         unsigned int last;
6178
6179         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6180         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6181         if (tg3_flag(tp, ENABLE_TSS))
6182                 tnapi++;
6183
6184         budget = tg3_tx_avail(tnapi);
6185
6186         /* We are running in BH disabled context with netif_tx_lock
6187          * and TX reclaim runs via tp->napi.poll inside of a software
6188          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6189          * no IRQ context deadlocks to worry about either.  Rejoice!
6190          */
6191         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6192                 if (!netif_tx_queue_stopped(txq)) {
6193                         netif_tx_stop_queue(txq);
6194
6195                         /* This is a hard error, log it. */
6196                         netdev_err(dev,
6197                                    "BUG! Tx Ring full when queue awake!\n");
6198                 }
6199                 return NETDEV_TX_BUSY;
6200         }
6201
6202         entry = tnapi->tx_prod;
6203         base_flags = 0;
6204         if (skb->ip_summed == CHECKSUM_PARTIAL)
6205                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6206
6207         mss = skb_shinfo(skb)->gso_size;
6208         if (mss) {
6209                 struct iphdr *iph;
6210                 u32 tcp_opt_len, hdr_len;
6211
6212                 if (skb_header_cloned(skb) &&
6213                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6214                         dev_kfree_skb(skb);
6215                         goto out_unlock;
6216                 }
6217
6218                 iph = ip_hdr(skb);
6219                 tcp_opt_len = tcp_optlen(skb);
6220
6221                 if (skb_is_gso_v6(skb)) {
6222                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6223                 } else {
6224                         u32 ip_tcp_len;
6225
6226                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6227                         hdr_len = ip_tcp_len + tcp_opt_len;
6228
6229                         iph->check = 0;
6230                         iph->tot_len = htons(mss + hdr_len);
6231                 }
6232
6233                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6234                     tg3_flag(tp, TSO_BUG))
6235                         return tg3_tso_bug(tp, skb);
6236
6237                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6238                                TXD_FLAG_CPU_POST_DMA);
6239
6240                 if (tg3_flag(tp, HW_TSO_1) ||
6241                     tg3_flag(tp, HW_TSO_2) ||
6242                     tg3_flag(tp, HW_TSO_3)) {
6243                         tcp_hdr(skb)->check = 0;
6244                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6245                 } else
6246                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6247                                                                  iph->daddr, 0,
6248                                                                  IPPROTO_TCP,
6249                                                                  0);
6250
6251                 if (tg3_flag(tp, HW_TSO_3)) {
6252                         mss |= (hdr_len & 0xc) << 12;
6253                         if (hdr_len & 0x10)
6254                                 base_flags |= 0x00000010;
6255                         base_flags |= (hdr_len & 0x3e0) << 5;
6256                 } else if (tg3_flag(tp, HW_TSO_2))
6257                         mss |= hdr_len << 9;
6258                 else if (tg3_flag(tp, HW_TSO_1) ||
6259                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6260                         if (tcp_opt_len || iph->ihl > 5) {
6261                                 int tsflags;
6262
6263                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6264                                 mss |= (tsflags << 11);
6265                         }
6266                 } else {
6267                         if (tcp_opt_len || iph->ihl > 5) {
6268                                 int tsflags;
6269
6270                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6271                                 base_flags |= tsflags << 12;
6272                         }
6273                 }
6274         }
6275
6276 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6277         if (vlan_tx_tag_present(skb)) {
6278                 base_flags |= TXD_FLAG_VLAN;
6279                 vlan = vlan_tx_tag_get(skb);
6280         }
6281 #endif
6282
6283         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6284             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6285                 base_flags |= TXD_FLAG_JMB_PKT;
6286
6287         len = skb_headlen(skb);
6288
6289         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6290         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6291                 dev_kfree_skb(skb);
6292                 goto out_unlock;
6293         }
6294
6295         tnapi->tx_buffers[entry].skb = skb;
6296         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6297
6298         would_hit_hwbug = 0;
6299
6300         if (tg3_flag(tp, 5701_DMA_BUG))
6301                 would_hit_hwbug = 1;
6302
6303         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6304                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6305                             mss, vlan))
6306                 would_hit_hwbug = 1;
6307
6308         /* Now loop through additional data fragments, and queue them. */
6309         if (skb_shinfo(skb)->nr_frags > 0) {
6310                 u32 tmp_mss = mss;
6311
6312                 if (!tg3_flag(tp, HW_TSO_1) &&
6313                     !tg3_flag(tp, HW_TSO_2) &&
6314                     !tg3_flag(tp, HW_TSO_3))
6315                         tmp_mss = 0;
6316
6317                 last = skb_shinfo(skb)->nr_frags - 1;
6318                 for (i = 0; i <= last; i++) {
6319                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6320
6321                         len = frag->size;
6322                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6323                                                    len, PCI_DMA_TODEVICE);
6324
6325                         tnapi->tx_buffers[entry].skb = NULL;
6326                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6327                                            mapping);
6328                         if (pci_dma_mapping_error(tp->pdev, mapping))
6329                                 goto dma_error;
6330
6331                         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6332                                             len, base_flags |
6333                                             ((i == last) ? TXD_FLAG_END : 0),
6334                                             tmp_mss, vlan))
6335                                 would_hit_hwbug = 1;
6336                 }
6337         }
6338
6339         if (would_hit_hwbug) {
6340                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6341
6342                 /* If the workaround fails due to memory/mapping
6343                  * failure, silently drop this packet.
6344                  */
6345                 entry = tnapi->tx_prod;
6346                 budget = tg3_tx_avail(tnapi);
6347                 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6348                                                 base_flags, mss, vlan))
6349                         goto out_unlock;
6350         }
6351
6352         skb_tx_timestamp(skb);
6353
6354         /* Packets are ready, update Tx producer idx local and on card. */
6355         tw32_tx_mbox(tnapi->prodmbox, entry);
6356
6357         tnapi->tx_prod = entry;
6358         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6359                 netif_tx_stop_queue(txq);
6360
6361                 /* netif_tx_stop_queue() must be done before checking
6362                  * checking tx index in tg3_tx_avail() below, because in
6363                  * tg3_tx(), we update tx index before checking for
6364                  * netif_tx_queue_stopped().
6365                  */
6366                 smp_mb();
6367                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6368                         netif_tx_wake_queue(txq);
6369         }
6370
6371 out_unlock:
6372         mmiowb();
6373
6374         return NETDEV_TX_OK;
6375
6376 dma_error:
6377         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6378         dev_kfree_skb(skb);
6379         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6380         return NETDEV_TX_OK;
6381 }
6382
6383 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6384 {
6385         if (enable) {
6386                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6387                                   MAC_MODE_PORT_MODE_MASK);
6388
6389                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6390
6391                 if (!tg3_flag(tp, 5705_PLUS))
6392                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6393
6394                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6395                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6396                 else
6397                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6398         } else {
6399                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6400
6401                 if (tg3_flag(tp, 5705_PLUS) ||
6402                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6403                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6404                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6405         }
6406
6407         tw32(MAC_MODE, tp->mac_mode);
6408         udelay(40);
6409 }
6410
6411 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6412 {
6413         u32 val, bmcr, mac_mode, ptest = 0;
6414
6415         tg3_phy_toggle_apd(tp, false);
6416         tg3_phy_toggle_automdix(tp, 0);
6417
6418         if (extlpbk && tg3_phy_set_extloopbk(tp))
6419                 return -EIO;
6420
6421         bmcr = BMCR_FULLDPLX;
6422         switch (speed) {
6423         case SPEED_10:
6424                 break;
6425         case SPEED_100:
6426                 bmcr |= BMCR_SPEED100;
6427                 break;
6428         case SPEED_1000:
6429         default:
6430                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6431                         speed = SPEED_100;
6432                         bmcr |= BMCR_SPEED100;
6433                 } else {
6434                         speed = SPEED_1000;
6435                         bmcr |= BMCR_SPEED1000;
6436                 }
6437         }
6438
6439         if (extlpbk) {
6440                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6441                         tg3_readphy(tp, MII_CTRL1000, &val);
6442                         val |= CTL1000_AS_MASTER |
6443                                CTL1000_ENABLE_MASTER;
6444                         tg3_writephy(tp, MII_CTRL1000, val);
6445                 } else {
6446                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6447                                 MII_TG3_FET_PTEST_TRIM_2;
6448                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6449                 }
6450         } else
6451                 bmcr |= BMCR_LOOPBACK;
6452
6453         tg3_writephy(tp, MII_BMCR, bmcr);
6454
6455         /* The write needs to be flushed for the FETs */
6456         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6457                 tg3_readphy(tp, MII_BMCR, &bmcr);
6458
6459         udelay(40);
6460
6461         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6462             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6463                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6464                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6465                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6466
6467                 /* The write needs to be flushed for the AC131 */
6468                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6469         }
6470
6471         /* Reset to prevent losing 1st rx packet intermittently */
6472         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6473             tg3_flag(tp, 5780_CLASS)) {
6474                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6475                 udelay(10);
6476                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6477         }
6478
6479         mac_mode = tp->mac_mode &
6480                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6481         if (speed == SPEED_1000)
6482                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6483         else
6484                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6485
6486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6487                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6488
6489                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6490                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6491                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6492                         mac_mode |= MAC_MODE_LINK_POLARITY;
6493
6494                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6495                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6496         }
6497
6498         tw32(MAC_MODE, mac_mode);
6499         udelay(40);
6500
6501         return 0;
6502 }
6503
6504 static void tg3_set_loopback(struct net_device *dev, u32 features)
6505 {
6506         struct tg3 *tp = netdev_priv(dev);
6507
6508         if (features & NETIF_F_LOOPBACK) {
6509                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6510                         return;
6511
6512                 spin_lock_bh(&tp->lock);
6513                 tg3_mac_loopback(tp, true);
6514                 netif_carrier_on(tp->dev);
6515                 spin_unlock_bh(&tp->lock);
6516                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6517         } else {
6518                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6519                         return;
6520
6521                 spin_lock_bh(&tp->lock);
6522                 tg3_mac_loopback(tp, false);
6523                 /* Force link status check */
6524                 tg3_setup_phy(tp, 1);
6525                 spin_unlock_bh(&tp->lock);
6526                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6527         }
6528 }
6529
6530 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6531 {
6532         struct tg3 *tp = netdev_priv(dev);
6533
6534         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6535                 features &= ~NETIF_F_ALL_TSO;
6536
6537         return features;
6538 }
6539
6540 static int tg3_set_features(struct net_device *dev, u32 features)
6541 {
6542         u32 changed = dev->features ^ features;
6543
6544         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6545                 tg3_set_loopback(dev, features);
6546
6547         return 0;
6548 }
6549
6550 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6551                                int new_mtu)
6552 {
6553         dev->mtu = new_mtu;
6554
6555         if (new_mtu > ETH_DATA_LEN) {
6556                 if (tg3_flag(tp, 5780_CLASS)) {
6557                         netdev_update_features(dev);
6558                         tg3_flag_clear(tp, TSO_CAPABLE);
6559                 } else {
6560                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6561                 }
6562         } else {
6563                 if (tg3_flag(tp, 5780_CLASS)) {
6564                         tg3_flag_set(tp, TSO_CAPABLE);
6565                         netdev_update_features(dev);
6566                 }
6567                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6568         }
6569 }
6570
6571 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6572 {
6573         struct tg3 *tp = netdev_priv(dev);
6574         int err;
6575
6576         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6577                 return -EINVAL;
6578
6579         if (!netif_running(dev)) {
6580                 /* We'll just catch it later when the
6581                  * device is up'd.
6582                  */
6583                 tg3_set_mtu(dev, tp, new_mtu);
6584                 return 0;
6585         }
6586
6587         tg3_phy_stop(tp);
6588
6589         tg3_netif_stop(tp);
6590
6591         tg3_full_lock(tp, 1);
6592
6593         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6594
6595         tg3_set_mtu(dev, tp, new_mtu);
6596
6597         err = tg3_restart_hw(tp, 0);
6598
6599         if (!err)
6600                 tg3_netif_start(tp);
6601
6602         tg3_full_unlock(tp);
6603
6604         if (!err)
6605                 tg3_phy_start(tp);
6606
6607         return err;
6608 }
6609
6610 static void tg3_rx_prodring_free(struct tg3 *tp,
6611                                  struct tg3_rx_prodring_set *tpr)
6612 {
6613         int i;
6614
6615         if (tpr != &tp->napi[0].prodring) {
6616                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6617                      i = (i + 1) & tp->rx_std_ring_mask)
6618                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6619                                         tp->rx_pkt_map_sz);
6620
6621                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6622                         for (i = tpr->rx_jmb_cons_idx;
6623                              i != tpr->rx_jmb_prod_idx;
6624                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6625                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6626                                                 TG3_RX_JMB_MAP_SZ);
6627                         }
6628                 }
6629
6630                 return;
6631         }
6632
6633         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6634                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6635                                 tp->rx_pkt_map_sz);
6636
6637         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6638                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6639                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6640                                         TG3_RX_JMB_MAP_SZ);
6641         }
6642 }
6643
6644 /* Initialize rx rings for packet processing.
6645  *
6646  * The chip has been shut down and the driver detached from
6647  * the networking, so no interrupts or new tx packets will
6648  * end up in the driver.  tp->{tx,}lock are held and thus
6649  * we may not sleep.
6650  */
6651 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6652                                  struct tg3_rx_prodring_set *tpr)
6653 {
6654         u32 i, rx_pkt_dma_sz;
6655
6656         tpr->rx_std_cons_idx = 0;
6657         tpr->rx_std_prod_idx = 0;
6658         tpr->rx_jmb_cons_idx = 0;
6659         tpr->rx_jmb_prod_idx = 0;
6660
6661         if (tpr != &tp->napi[0].prodring) {
6662                 memset(&tpr->rx_std_buffers[0], 0,
6663                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6664                 if (tpr->rx_jmb_buffers)
6665                         memset(&tpr->rx_jmb_buffers[0], 0,
6666                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6667                 goto done;
6668         }
6669
6670         /* Zero out all descriptors. */
6671         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6672
6673         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6674         if (tg3_flag(tp, 5780_CLASS) &&
6675             tp->dev->mtu > ETH_DATA_LEN)
6676                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6677         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6678
6679         /* Initialize invariants of the rings, we only set this
6680          * stuff once.  This works because the card does not
6681          * write into the rx buffer posting rings.
6682          */
6683         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6684                 struct tg3_rx_buffer_desc *rxd;
6685
6686                 rxd = &tpr->rx_std[i];
6687                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6688                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6689                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6690                                (i << RXD_OPAQUE_INDEX_SHIFT));
6691         }
6692
6693         /* Now allocate fresh SKBs for each rx ring. */
6694         for (i = 0; i < tp->rx_pending; i++) {
6695                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6696                         netdev_warn(tp->dev,
6697                                     "Using a smaller RX standard ring. Only "
6698                                     "%d out of %d buffers were allocated "
6699                                     "successfully\n", i, tp->rx_pending);
6700                         if (i == 0)
6701                                 goto initfail;
6702                         tp->rx_pending = i;
6703                         break;
6704                 }
6705         }
6706
6707         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6708                 goto done;
6709
6710         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6711
6712         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6713                 goto done;
6714
6715         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6716                 struct tg3_rx_buffer_desc *rxd;
6717
6718                 rxd = &tpr->rx_jmb[i].std;
6719                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6720                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6721                                   RXD_FLAG_JUMBO;
6722                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6723                        (i << RXD_OPAQUE_INDEX_SHIFT));
6724         }
6725
6726         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6727                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6728                         netdev_warn(tp->dev,
6729                                     "Using a smaller RX jumbo ring. Only %d "
6730                                     "out of %d buffers were allocated "
6731                                     "successfully\n", i, tp->rx_jumbo_pending);
6732                         if (i == 0)
6733                                 goto initfail;
6734                         tp->rx_jumbo_pending = i;
6735                         break;
6736                 }
6737         }
6738
6739 done:
6740         return 0;
6741
6742 initfail:
6743         tg3_rx_prodring_free(tp, tpr);
6744         return -ENOMEM;
6745 }
6746
6747 static void tg3_rx_prodring_fini(struct tg3 *tp,
6748                                  struct tg3_rx_prodring_set *tpr)
6749 {
6750         kfree(tpr->rx_std_buffers);
6751         tpr->rx_std_buffers = NULL;
6752         kfree(tpr->rx_jmb_buffers);
6753         tpr->rx_jmb_buffers = NULL;
6754         if (tpr->rx_std) {
6755                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6756                                   tpr->rx_std, tpr->rx_std_mapping);
6757                 tpr->rx_std = NULL;
6758         }
6759         if (tpr->rx_jmb) {
6760                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6761                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6762                 tpr->rx_jmb = NULL;
6763         }
6764 }
6765
6766 static int tg3_rx_prodring_init(struct tg3 *tp,
6767                                 struct tg3_rx_prodring_set *tpr)
6768 {
6769         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6770                                       GFP_KERNEL);
6771         if (!tpr->rx_std_buffers)
6772                 return -ENOMEM;
6773
6774         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6775                                          TG3_RX_STD_RING_BYTES(tp),
6776                                          &tpr->rx_std_mapping,
6777                                          GFP_KERNEL);
6778         if (!tpr->rx_std)
6779                 goto err_out;
6780
6781         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6782                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6783                                               GFP_KERNEL);
6784                 if (!tpr->rx_jmb_buffers)
6785                         goto err_out;
6786
6787                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6788                                                  TG3_RX_JMB_RING_BYTES(tp),
6789                                                  &tpr->rx_jmb_mapping,
6790                                                  GFP_KERNEL);
6791                 if (!tpr->rx_jmb)
6792                         goto err_out;
6793         }
6794
6795         return 0;
6796
6797 err_out:
6798         tg3_rx_prodring_fini(tp, tpr);
6799         return -ENOMEM;
6800 }
6801
6802 /* Free up pending packets in all rx/tx rings.
6803  *
6804  * The chip has been shut down and the driver detached from
6805  * the networking, so no interrupts or new tx packets will
6806  * end up in the driver.  tp->{tx,}lock is not held and we are not
6807  * in an interrupt context and thus may sleep.
6808  */
6809 static void tg3_free_rings(struct tg3 *tp)
6810 {
6811         int i, j;
6812
6813         for (j = 0; j < tp->irq_cnt; j++) {
6814                 struct tg3_napi *tnapi = &tp->napi[j];
6815
6816                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6817
6818                 if (!tnapi->tx_buffers)
6819                         continue;
6820
6821                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6822                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6823
6824                         if (!skb)
6825                                 continue;
6826
6827                         tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6828
6829                         dev_kfree_skb_any(skb);
6830                 }
6831         }
6832 }
6833
6834 /* Initialize tx/rx rings for packet processing.
6835  *
6836  * The chip has been shut down and the driver detached from
6837  * the networking, so no interrupts or new tx packets will
6838  * end up in the driver.  tp->{tx,}lock are held and thus
6839  * we may not sleep.
6840  */
6841 static int tg3_init_rings(struct tg3 *tp)
6842 {
6843         int i;
6844
6845         /* Free up all the SKBs. */
6846         tg3_free_rings(tp);
6847
6848         for (i = 0; i < tp->irq_cnt; i++) {
6849                 struct tg3_napi *tnapi = &tp->napi[i];
6850
6851                 tnapi->last_tag = 0;
6852                 tnapi->last_irq_tag = 0;
6853                 tnapi->hw_status->status = 0;
6854                 tnapi->hw_status->status_tag = 0;
6855                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6856
6857                 tnapi->tx_prod = 0;
6858                 tnapi->tx_cons = 0;
6859                 if (tnapi->tx_ring)
6860                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6861
6862                 tnapi->rx_rcb_ptr = 0;
6863                 if (tnapi->rx_rcb)
6864                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6865
6866                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6867                         tg3_free_rings(tp);
6868                         return -ENOMEM;
6869                 }
6870         }
6871
6872         return 0;
6873 }
6874
6875 /*
6876  * Must not be invoked with interrupt sources disabled and
6877  * the hardware shutdown down.
6878  */
6879 static void tg3_free_consistent(struct tg3 *tp)
6880 {
6881         int i;
6882
6883         for (i = 0; i < tp->irq_cnt; i++) {
6884                 struct tg3_napi *tnapi = &tp->napi[i];
6885
6886                 if (tnapi->tx_ring) {
6887                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6888                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6889                         tnapi->tx_ring = NULL;
6890                 }
6891
6892                 kfree(tnapi->tx_buffers);
6893                 tnapi->tx_buffers = NULL;
6894
6895                 if (tnapi->rx_rcb) {
6896                         dma_free_coherent(&tp->pdev->dev,
6897                                           TG3_RX_RCB_RING_BYTES(tp),
6898                                           tnapi->rx_rcb,
6899                                           tnapi->rx_rcb_mapping);
6900                         tnapi->rx_rcb = NULL;
6901                 }
6902
6903                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6904
6905                 if (tnapi->hw_status) {
6906                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6907                                           tnapi->hw_status,
6908                                           tnapi->status_mapping);
6909                         tnapi->hw_status = NULL;
6910                 }
6911         }
6912
6913         if (tp->hw_stats) {
6914                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6915                                   tp->hw_stats, tp->stats_mapping);
6916                 tp->hw_stats = NULL;
6917         }
6918 }
6919
6920 /*
6921  * Must not be invoked with interrupt sources disabled and
6922  * the hardware shutdown down.  Can sleep.
6923  */
6924 static int tg3_alloc_consistent(struct tg3 *tp)
6925 {
6926         int i;
6927
6928         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6929                                           sizeof(struct tg3_hw_stats),
6930                                           &tp->stats_mapping,
6931                                           GFP_KERNEL);
6932         if (!tp->hw_stats)
6933                 goto err_out;
6934
6935         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6936
6937         for (i = 0; i < tp->irq_cnt; i++) {
6938                 struct tg3_napi *tnapi = &tp->napi[i];
6939                 struct tg3_hw_status *sblk;
6940
6941                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6942                                                       TG3_HW_STATUS_SIZE,
6943                                                       &tnapi->status_mapping,
6944                                                       GFP_KERNEL);
6945                 if (!tnapi->hw_status)
6946                         goto err_out;
6947
6948                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6949                 sblk = tnapi->hw_status;
6950
6951                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6952                         goto err_out;
6953
6954                 /* If multivector TSS is enabled, vector 0 does not handle
6955                  * tx interrupts.  Don't allocate any resources for it.
6956                  */
6957                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6958                     (i && tg3_flag(tp, ENABLE_TSS))) {
6959                         tnapi->tx_buffers = kzalloc(
6960                                                sizeof(struct tg3_tx_ring_info) *
6961                                                TG3_TX_RING_SIZE, GFP_KERNEL);
6962                         if (!tnapi->tx_buffers)
6963                                 goto err_out;
6964
6965                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6966                                                             TG3_TX_RING_BYTES,
6967                                                         &tnapi->tx_desc_mapping,
6968                                                             GFP_KERNEL);
6969                         if (!tnapi->tx_ring)
6970                                 goto err_out;
6971                 }
6972
6973                 /*
6974                  * When RSS is enabled, the status block format changes
6975                  * slightly.  The "rx_jumbo_consumer", "reserved",
6976                  * and "rx_mini_consumer" members get mapped to the
6977                  * other three rx return ring producer indexes.
6978                  */
6979                 switch (i) {
6980                 default:
6981                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6982                         break;
6983                 case 2:
6984                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6985                         break;
6986                 case 3:
6987                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6988                         break;
6989                 case 4:
6990                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6991                         break;
6992                 }
6993
6994                 /*
6995                  * If multivector RSS is enabled, vector 0 does not handle
6996                  * rx or tx interrupts.  Don't allocate any resources for it.
6997                  */
6998                 if (!i && tg3_flag(tp, ENABLE_RSS))
6999                         continue;
7000
7001                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7002                                                    TG3_RX_RCB_RING_BYTES(tp),
7003                                                    &tnapi->rx_rcb_mapping,
7004                                                    GFP_KERNEL);
7005                 if (!tnapi->rx_rcb)
7006                         goto err_out;
7007
7008                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7009         }
7010
7011         return 0;
7012
7013 err_out:
7014         tg3_free_consistent(tp);
7015         return -ENOMEM;
7016 }
7017
7018 #define MAX_WAIT_CNT 1000
7019
7020 /* To stop a block, clear the enable bit and poll till it
7021  * clears.  tp->lock is held.
7022  */
7023 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7024 {
7025         unsigned int i;
7026         u32 val;
7027
7028         if (tg3_flag(tp, 5705_PLUS)) {
7029                 switch (ofs) {
7030                 case RCVLSC_MODE:
7031                 case DMAC_MODE:
7032                 case MBFREE_MODE:
7033                 case BUFMGR_MODE:
7034                 case MEMARB_MODE:
7035                         /* We can't enable/disable these bits of the
7036                          * 5705/5750, just say success.
7037                          */
7038                         return 0;
7039
7040                 default:
7041                         break;
7042                 }
7043         }
7044
7045         val = tr32(ofs);
7046         val &= ~enable_bit;
7047         tw32_f(ofs, val);
7048
7049         for (i = 0; i < MAX_WAIT_CNT; i++) {
7050                 udelay(100);
7051                 val = tr32(ofs);
7052                 if ((val & enable_bit) == 0)
7053                         break;
7054         }
7055
7056         if (i == MAX_WAIT_CNT && !silent) {
7057                 dev_err(&tp->pdev->dev,
7058                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7059                         ofs, enable_bit);
7060                 return -ENODEV;
7061         }
7062
7063         return 0;
7064 }
7065
7066 /* tp->lock is held. */
7067 static int tg3_abort_hw(struct tg3 *tp, int silent)
7068 {
7069         int i, err;
7070
7071         tg3_disable_ints(tp);
7072
7073         tp->rx_mode &= ~RX_MODE_ENABLE;
7074         tw32_f(MAC_RX_MODE, tp->rx_mode);
7075         udelay(10);
7076
7077         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7078         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7079         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7080         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7081         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7082         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7083
7084         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7085         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7086         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7087         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7088         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7089         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7090         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7091
7092         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7093         tw32_f(MAC_MODE, tp->mac_mode);
7094         udelay(40);
7095
7096         tp->tx_mode &= ~TX_MODE_ENABLE;
7097         tw32_f(MAC_TX_MODE, tp->tx_mode);
7098
7099         for (i = 0; i < MAX_WAIT_CNT; i++) {
7100                 udelay(100);
7101                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7102                         break;
7103         }
7104         if (i >= MAX_WAIT_CNT) {
7105                 dev_err(&tp->pdev->dev,
7106                         "%s timed out, TX_MODE_ENABLE will not clear "
7107                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7108                 err |= -ENODEV;
7109         }
7110
7111         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7112         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7113         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7114
7115         tw32(FTQ_RESET, 0xffffffff);
7116         tw32(FTQ_RESET, 0x00000000);
7117
7118         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7119         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7120
7121         for (i = 0; i < tp->irq_cnt; i++) {
7122                 struct tg3_napi *tnapi = &tp->napi[i];
7123                 if (tnapi->hw_status)
7124                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7125         }
7126         if (tp->hw_stats)
7127                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7128
7129         return err;
7130 }
7131
7132 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
7133 {
7134         int i;
7135         u32 apedata;
7136
7137         /* NCSI does not support APE events */
7138         if (tg3_flag(tp, APE_HAS_NCSI))
7139                 return;
7140
7141         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
7142         if (apedata != APE_SEG_SIG_MAGIC)
7143                 return;
7144
7145         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
7146         if (!(apedata & APE_FW_STATUS_READY))
7147                 return;
7148
7149         /* Wait for up to 1 millisecond for APE to service previous event. */
7150         for (i = 0; i < 10; i++) {
7151                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
7152                         return;
7153
7154                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
7155
7156                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7157                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
7158                                         event | APE_EVENT_STATUS_EVENT_PENDING);
7159
7160                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7161
7162                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7163                         break;
7164
7165                 udelay(100);
7166         }
7167
7168         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7169                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7170 }
7171
7172 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7173 {
7174         u32 event;
7175         u32 apedata;
7176
7177         if (!tg3_flag(tp, ENABLE_APE))
7178                 return;
7179
7180         switch (kind) {
7181         case RESET_KIND_INIT:
7182                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7183                                 APE_HOST_SEG_SIG_MAGIC);
7184                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7185                                 APE_HOST_SEG_LEN_MAGIC);
7186                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7187                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7188                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7189                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7190                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7191                                 APE_HOST_BEHAV_NO_PHYLOCK);
7192                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7193                                     TG3_APE_HOST_DRVR_STATE_START);
7194
7195                 event = APE_EVENT_STATUS_STATE_START;
7196                 break;
7197         case RESET_KIND_SHUTDOWN:
7198                 /* With the interface we are currently using,
7199                  * APE does not track driver state.  Wiping
7200                  * out the HOST SEGMENT SIGNATURE forces
7201                  * the APE to assume OS absent status.
7202                  */
7203                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7204
7205                 if (device_may_wakeup(&tp->pdev->dev) &&
7206                     tg3_flag(tp, WOL_ENABLE)) {
7207                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7208                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7209                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7210                 } else
7211                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7212
7213                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7214
7215                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7216                 break;
7217         case RESET_KIND_SUSPEND:
7218                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7219                 break;
7220         default:
7221                 return;
7222         }
7223
7224         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7225
7226         tg3_ape_send_event(tp, event);
7227 }
7228
7229 /* tp->lock is held. */
7230 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7231 {
7232         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7233                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7234
7235         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7236                 switch (kind) {
7237                 case RESET_KIND_INIT:
7238                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7239                                       DRV_STATE_START);
7240                         break;
7241
7242                 case RESET_KIND_SHUTDOWN:
7243                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7244                                       DRV_STATE_UNLOAD);
7245                         break;
7246
7247                 case RESET_KIND_SUSPEND:
7248                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7249                                       DRV_STATE_SUSPEND);
7250                         break;
7251
7252                 default:
7253                         break;
7254                 }
7255         }
7256
7257         if (kind == RESET_KIND_INIT ||
7258             kind == RESET_KIND_SUSPEND)
7259                 tg3_ape_driver_state_change(tp, kind);
7260 }
7261
7262 /* tp->lock is held. */
7263 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7264 {
7265         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7266                 switch (kind) {
7267                 case RESET_KIND_INIT:
7268                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7269                                       DRV_STATE_START_DONE);
7270                         break;
7271
7272                 case RESET_KIND_SHUTDOWN:
7273                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7274                                       DRV_STATE_UNLOAD_DONE);
7275                         break;
7276
7277                 default:
7278                         break;
7279                 }
7280         }
7281
7282         if (kind == RESET_KIND_SHUTDOWN)
7283                 tg3_ape_driver_state_change(tp, kind);
7284 }
7285
7286 /* tp->lock is held. */
7287 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7288 {
7289         if (tg3_flag(tp, ENABLE_ASF)) {
7290                 switch (kind) {
7291                 case RESET_KIND_INIT:
7292                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7293                                       DRV_STATE_START);
7294                         break;
7295
7296                 case RESET_KIND_SHUTDOWN:
7297                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7298                                       DRV_STATE_UNLOAD);
7299                         break;
7300
7301                 case RESET_KIND_SUSPEND:
7302                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7303                                       DRV_STATE_SUSPEND);
7304                         break;
7305
7306                 default:
7307                         break;
7308                 }
7309         }
7310 }
7311
7312 static int tg3_poll_fw(struct tg3 *tp)
7313 {
7314         int i;
7315         u32 val;
7316
7317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7318                 /* Wait up to 20ms for init done. */
7319                 for (i = 0; i < 200; i++) {
7320                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7321                                 return 0;
7322                         udelay(100);
7323                 }
7324                 return -ENODEV;
7325         }
7326
7327         /* Wait for firmware initialization to complete. */
7328         for (i = 0; i < 100000; i++) {
7329                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7330                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7331                         break;
7332                 udelay(10);
7333         }
7334
7335         /* Chip might not be fitted with firmware.  Some Sun onboard
7336          * parts are configured like that.  So don't signal the timeout
7337          * of the above loop as an error, but do report the lack of
7338          * running firmware once.
7339          */
7340         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7341                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7342
7343                 netdev_info(tp->dev, "No firmware running\n");
7344         }
7345
7346         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7347                 /* The 57765 A0 needs a little more
7348                  * time to do some important work.
7349                  */
7350                 mdelay(10);
7351         }
7352
7353         return 0;
7354 }
7355
7356 /* Save PCI command register before chip reset */
7357 static void tg3_save_pci_state(struct tg3 *tp)
7358 {
7359         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7360 }
7361
7362 /* Restore PCI state after chip reset */
7363 static void tg3_restore_pci_state(struct tg3 *tp)
7364 {
7365         u32 val;
7366
7367         /* Re-enable indirect register accesses. */
7368         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7369                                tp->misc_host_ctrl);
7370
7371         /* Set MAX PCI retry to zero. */
7372         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7373         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7374             tg3_flag(tp, PCIX_MODE))
7375                 val |= PCISTATE_RETRY_SAME_DMA;
7376         /* Allow reads and writes to the APE register and memory space. */
7377         if (tg3_flag(tp, ENABLE_APE))
7378                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7379                        PCISTATE_ALLOW_APE_SHMEM_WR |
7380                        PCISTATE_ALLOW_APE_PSPACE_WR;
7381         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7382
7383         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7384
7385         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7386                 if (tg3_flag(tp, PCI_EXPRESS))
7387                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7388                 else {
7389                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7390                                               tp->pci_cacheline_sz);
7391                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7392                                               tp->pci_lat_timer);
7393                 }
7394         }
7395
7396         /* Make sure PCI-X relaxed ordering bit is clear. */
7397         if (tg3_flag(tp, PCIX_MODE)) {
7398                 u16 pcix_cmd;
7399
7400                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7401                                      &pcix_cmd);
7402                 pcix_cmd &= ~PCI_X_CMD_ERO;
7403                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7404                                       pcix_cmd);
7405         }
7406
7407         if (tg3_flag(tp, 5780_CLASS)) {
7408
7409                 /* Chip reset on 5780 will reset MSI enable bit,
7410                  * so need to restore it.
7411                  */
7412                 if (tg3_flag(tp, USING_MSI)) {
7413                         u16 ctrl;
7414
7415                         pci_read_config_word(tp->pdev,
7416                                              tp->msi_cap + PCI_MSI_FLAGS,
7417                                              &ctrl);
7418                         pci_write_config_word(tp->pdev,
7419                                               tp->msi_cap + PCI_MSI_FLAGS,
7420                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7421                         val = tr32(MSGINT_MODE);
7422                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7423                 }
7424         }
7425 }
7426
7427 static void tg3_stop_fw(struct tg3 *);
7428
7429 /* tp->lock is held. */
7430 static int tg3_chip_reset(struct tg3 *tp)
7431 {
7432         u32 val;
7433         void (*write_op)(struct tg3 *, u32, u32);
7434         int i, err;
7435
7436         tg3_nvram_lock(tp);
7437
7438         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7439
7440         /* No matching tg3_nvram_unlock() after this because
7441          * chip reset below will undo the nvram lock.
7442          */
7443         tp->nvram_lock_cnt = 0;
7444
7445         /* GRC_MISC_CFG core clock reset will clear the memory
7446          * enable bit in PCI register 4 and the MSI enable bit
7447          * on some chips, so we save relevant registers here.
7448          */
7449         tg3_save_pci_state(tp);
7450
7451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7452             tg3_flag(tp, 5755_PLUS))
7453                 tw32(GRC_FASTBOOT_PC, 0);
7454
7455         /*
7456          * We must avoid the readl() that normally takes place.
7457          * It locks machines, causes machine checks, and other
7458          * fun things.  So, temporarily disable the 5701
7459          * hardware workaround, while we do the reset.
7460          */
7461         write_op = tp->write32;
7462         if (write_op == tg3_write_flush_reg32)
7463                 tp->write32 = tg3_write32;
7464
7465         /* Prevent the irq handler from reading or writing PCI registers
7466          * during chip reset when the memory enable bit in the PCI command
7467          * register may be cleared.  The chip does not generate interrupt
7468          * at this time, but the irq handler may still be called due to irq
7469          * sharing or irqpoll.
7470          */
7471         tg3_flag_set(tp, CHIP_RESETTING);
7472         for (i = 0; i < tp->irq_cnt; i++) {
7473                 struct tg3_napi *tnapi = &tp->napi[i];
7474                 if (tnapi->hw_status) {
7475                         tnapi->hw_status->status = 0;
7476                         tnapi->hw_status->status_tag = 0;
7477                 }
7478                 tnapi->last_tag = 0;
7479                 tnapi->last_irq_tag = 0;
7480         }
7481         smp_mb();
7482
7483         for (i = 0; i < tp->irq_cnt; i++)
7484                 synchronize_irq(tp->napi[i].irq_vec);
7485
7486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7487                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7488                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7489         }
7490
7491         /* do the reset */
7492         val = GRC_MISC_CFG_CORECLK_RESET;
7493
7494         if (tg3_flag(tp, PCI_EXPRESS)) {
7495                 /* Force PCIe 1.0a mode */
7496                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7497                     !tg3_flag(tp, 57765_PLUS) &&
7498                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7499                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7500                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7501
7502                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7503                         tw32(GRC_MISC_CFG, (1 << 29));
7504                         val |= (1 << 29);
7505                 }
7506         }
7507
7508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7509                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7510                 tw32(GRC_VCPU_EXT_CTRL,
7511                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7512         }
7513
7514         /* Manage gphy power for all CPMU absent PCIe devices. */
7515         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7516                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7517
7518         tw32(GRC_MISC_CFG, val);
7519
7520         /* restore 5701 hardware bug workaround write method */
7521         tp->write32 = write_op;
7522
7523         /* Unfortunately, we have to delay before the PCI read back.
7524          * Some 575X chips even will not respond to a PCI cfg access
7525          * when the reset command is given to the chip.
7526          *
7527          * How do these hardware designers expect things to work
7528          * properly if the PCI write is posted for a long period
7529          * of time?  It is always necessary to have some method by
7530          * which a register read back can occur to push the write
7531          * out which does the reset.
7532          *
7533          * For most tg3 variants the trick below was working.
7534          * Ho hum...
7535          */
7536         udelay(120);
7537
7538         /* Flush PCI posted writes.  The normal MMIO registers
7539          * are inaccessible at this time so this is the only
7540          * way to make this reliably (actually, this is no longer
7541          * the case, see above).  I tried to use indirect
7542          * register read/write but this upset some 5701 variants.
7543          */
7544         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7545
7546         udelay(120);
7547
7548         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7549                 u16 val16;
7550
7551                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7552                         int i;
7553                         u32 cfg_val;
7554
7555                         /* Wait for link training to complete.  */
7556                         for (i = 0; i < 5000; i++)
7557                                 udelay(100);
7558
7559                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7560                         pci_write_config_dword(tp->pdev, 0xc4,
7561                                                cfg_val | (1 << 15));
7562                 }
7563
7564                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7565                 pci_read_config_word(tp->pdev,
7566                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7567                                      &val16);
7568                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7569                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7570                 /*
7571                  * Older PCIe devices only support the 128 byte
7572                  * MPS setting.  Enforce the restriction.
7573                  */
7574                 if (!tg3_flag(tp, CPMU_PRESENT))
7575                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7576                 pci_write_config_word(tp->pdev,
7577                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7578                                       val16);
7579
7580                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7581
7582                 /* Clear error status */
7583                 pci_write_config_word(tp->pdev,
7584                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7585                                       PCI_EXP_DEVSTA_CED |
7586                                       PCI_EXP_DEVSTA_NFED |
7587                                       PCI_EXP_DEVSTA_FED |
7588                                       PCI_EXP_DEVSTA_URD);
7589         }
7590
7591         tg3_restore_pci_state(tp);
7592
7593         tg3_flag_clear(tp, CHIP_RESETTING);
7594         tg3_flag_clear(tp, ERROR_PROCESSED);
7595
7596         val = 0;
7597         if (tg3_flag(tp, 5780_CLASS))
7598                 val = tr32(MEMARB_MODE);
7599         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7600
7601         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7602                 tg3_stop_fw(tp);
7603                 tw32(0x5000, 0x400);
7604         }
7605
7606         tw32(GRC_MODE, tp->grc_mode);
7607
7608         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7609                 val = tr32(0xc4);
7610
7611                 tw32(0xc4, val | (1 << 15));
7612         }
7613
7614         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7615             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7616                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7617                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7618                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7619                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7620         }
7621
7622         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7623                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7624                 val = tp->mac_mode;
7625         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7626                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7627                 val = tp->mac_mode;
7628         } else
7629                 val = 0;
7630
7631         tw32_f(MAC_MODE, val);
7632         udelay(40);
7633
7634         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7635
7636         err = tg3_poll_fw(tp);
7637         if (err)
7638                 return err;
7639
7640         tg3_mdio_start(tp);
7641
7642         if (tg3_flag(tp, PCI_EXPRESS) &&
7643             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7644             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7645             !tg3_flag(tp, 57765_PLUS)) {
7646                 val = tr32(0x7c00);
7647
7648                 tw32(0x7c00, val | (1 << 25));
7649         }
7650
7651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7652                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7653                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7654         }
7655
7656         /* Reprobe ASF enable state.  */
7657         tg3_flag_clear(tp, ENABLE_ASF);
7658         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7659         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7660         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7661                 u32 nic_cfg;
7662
7663                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7664                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7665                         tg3_flag_set(tp, ENABLE_ASF);
7666                         tp->last_event_jiffies = jiffies;
7667                         if (tg3_flag(tp, 5750_PLUS))
7668                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7669                 }
7670         }
7671
7672         return 0;
7673 }
7674
7675 /* tp->lock is held. */
7676 static void tg3_stop_fw(struct tg3 *tp)
7677 {
7678         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7679                 /* Wait for RX cpu to ACK the previous event. */
7680                 tg3_wait_for_event_ack(tp);
7681
7682                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7683
7684                 tg3_generate_fw_event(tp);
7685
7686                 /* Wait for RX cpu to ACK this event. */
7687                 tg3_wait_for_event_ack(tp);
7688         }
7689 }
7690
7691 /* tp->lock is held. */
7692 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7693 {
7694         int err;
7695
7696         tg3_stop_fw(tp);
7697
7698         tg3_write_sig_pre_reset(tp, kind);
7699
7700         tg3_abort_hw(tp, silent);
7701         err = tg3_chip_reset(tp);
7702
7703         __tg3_set_mac_addr(tp, 0);
7704
7705         tg3_write_sig_legacy(tp, kind);
7706         tg3_write_sig_post_reset(tp, kind);
7707
7708         if (err)
7709                 return err;
7710
7711         return 0;
7712 }
7713
7714 #define RX_CPU_SCRATCH_BASE     0x30000
7715 #define RX_CPU_SCRATCH_SIZE     0x04000
7716 #define TX_CPU_SCRATCH_BASE     0x34000
7717 #define TX_CPU_SCRATCH_SIZE     0x04000
7718
7719 /* tp->lock is held. */
7720 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7721 {
7722         int i;
7723
7724         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7725
7726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7727                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7728
7729                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7730                 return 0;
7731         }
7732         if (offset == RX_CPU_BASE) {
7733                 for (i = 0; i < 10000; i++) {
7734                         tw32(offset + CPU_STATE, 0xffffffff);
7735                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7736                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7737                                 break;
7738                 }
7739
7740                 tw32(offset + CPU_STATE, 0xffffffff);
7741                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7742                 udelay(10);
7743         } else {
7744                 for (i = 0; i < 10000; i++) {
7745                         tw32(offset + CPU_STATE, 0xffffffff);
7746                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7747                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7748                                 break;
7749                 }
7750         }
7751
7752         if (i >= 10000) {
7753                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7754                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7755                 return -ENODEV;
7756         }
7757
7758         /* Clear firmware's nvram arbitration. */
7759         if (tg3_flag(tp, NVRAM))
7760                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7761         return 0;
7762 }
7763
7764 struct fw_info {
7765         unsigned int fw_base;
7766         unsigned int fw_len;
7767         const __be32 *fw_data;
7768 };
7769
7770 /* tp->lock is held. */
7771 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7772                                  int cpu_scratch_size, struct fw_info *info)
7773 {
7774         int err, lock_err, i;
7775         void (*write_op)(struct tg3 *, u32, u32);
7776
7777         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7778                 netdev_err(tp->dev,
7779                            "%s: Trying to load TX cpu firmware which is 5705\n",
7780                            __func__);
7781                 return -EINVAL;
7782         }
7783
7784         if (tg3_flag(tp, 5705_PLUS))
7785                 write_op = tg3_write_mem;
7786         else
7787                 write_op = tg3_write_indirect_reg32;
7788
7789         /* It is possible that bootcode is still loading at this point.
7790          * Get the nvram lock first before halting the cpu.
7791          */
7792         lock_err = tg3_nvram_lock(tp);
7793         err = tg3_halt_cpu(tp, cpu_base);
7794         if (!lock_err)
7795                 tg3_nvram_unlock(tp);
7796         if (err)
7797                 goto out;
7798
7799         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7800                 write_op(tp, cpu_scratch_base + i, 0);
7801         tw32(cpu_base + CPU_STATE, 0xffffffff);
7802         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7803         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7804                 write_op(tp, (cpu_scratch_base +
7805                               (info->fw_base & 0xffff) +
7806                               (i * sizeof(u32))),
7807                               be32_to_cpu(info->fw_data[i]));
7808
7809         err = 0;
7810
7811 out:
7812         return err;
7813 }
7814
7815 /* tp->lock is held. */
7816 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7817 {
7818         struct fw_info info;
7819         const __be32 *fw_data;
7820         int err, i;
7821
7822         fw_data = (void *)tp->fw->data;
7823
7824         /* Firmware blob starts with version numbers, followed by
7825            start address and length. We are setting complete length.
7826            length = end_address_of_bss - start_address_of_text.
7827            Remainder is the blob to be loaded contiguously
7828            from start address. */
7829
7830         info.fw_base = be32_to_cpu(fw_data[1]);
7831         info.fw_len = tp->fw->size - 12;
7832         info.fw_data = &fw_data[3];
7833
7834         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7835                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7836                                     &info);
7837         if (err)
7838                 return err;
7839
7840         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7841                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7842                                     &info);
7843         if (err)
7844                 return err;
7845
7846         /* Now startup only the RX cpu. */
7847         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7848         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7849
7850         for (i = 0; i < 5; i++) {
7851                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7852                         break;
7853                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7854                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7855                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7856                 udelay(1000);
7857         }
7858         if (i >= 5) {
7859                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7860                            "should be %08x\n", __func__,
7861                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7862                 return -ENODEV;
7863         }
7864         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7865         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7866
7867         return 0;
7868 }
7869
7870 /* tp->lock is held. */
7871 static int tg3_load_tso_firmware(struct tg3 *tp)
7872 {
7873         struct fw_info info;
7874         const __be32 *fw_data;
7875         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7876         int err, i;
7877
7878         if (tg3_flag(tp, HW_TSO_1) ||
7879             tg3_flag(tp, HW_TSO_2) ||
7880             tg3_flag(tp, HW_TSO_3))
7881                 return 0;
7882
7883         fw_data = (void *)tp->fw->data;
7884
7885         /* Firmware blob starts with version numbers, followed by
7886            start address and length. We are setting complete length.
7887            length = end_address_of_bss - start_address_of_text.
7888            Remainder is the blob to be loaded contiguously
7889            from start address. */
7890
7891         info.fw_base = be32_to_cpu(fw_data[1]);
7892         cpu_scratch_size = tp->fw_len;
7893         info.fw_len = tp->fw->size - 12;
7894         info.fw_data = &fw_data[3];
7895
7896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7897                 cpu_base = RX_CPU_BASE;
7898                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7899         } else {
7900                 cpu_base = TX_CPU_BASE;
7901                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7902                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7903         }
7904
7905         err = tg3_load_firmware_cpu(tp, cpu_base,
7906                                     cpu_scratch_base, cpu_scratch_size,
7907                                     &info);
7908         if (err)
7909                 return err;
7910
7911         /* Now startup the cpu. */
7912         tw32(cpu_base + CPU_STATE, 0xffffffff);
7913         tw32_f(cpu_base + CPU_PC, info.fw_base);
7914
7915         for (i = 0; i < 5; i++) {
7916                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7917                         break;
7918                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7919                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7920                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7921                 udelay(1000);
7922         }
7923         if (i >= 5) {
7924                 netdev_err(tp->dev,
7925                            "%s fails to set CPU PC, is %08x should be %08x\n",
7926                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7927                 return -ENODEV;
7928         }
7929         tw32(cpu_base + CPU_STATE, 0xffffffff);
7930         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7931         return 0;
7932 }
7933
7934
7935 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7936 {
7937         struct tg3 *tp = netdev_priv(dev);
7938         struct sockaddr *addr = p;
7939         int err = 0, skip_mac_1 = 0;
7940
7941         if (!is_valid_ether_addr(addr->sa_data))
7942                 return -EINVAL;
7943
7944         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7945
7946         if (!netif_running(dev))
7947                 return 0;
7948
7949         if (tg3_flag(tp, ENABLE_ASF)) {
7950                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7951
7952                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7953                 addr0_low = tr32(MAC_ADDR_0_LOW);
7954                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7955                 addr1_low = tr32(MAC_ADDR_1_LOW);
7956
7957                 /* Skip MAC addr 1 if ASF is using it. */
7958                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7959                     !(addr1_high == 0 && addr1_low == 0))
7960                         skip_mac_1 = 1;
7961         }
7962         spin_lock_bh(&tp->lock);
7963         __tg3_set_mac_addr(tp, skip_mac_1);
7964         spin_unlock_bh(&tp->lock);
7965
7966         return err;
7967 }
7968
7969 /* tp->lock is held. */
7970 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7971                            dma_addr_t mapping, u32 maxlen_flags,
7972                            u32 nic_addr)
7973 {
7974         tg3_write_mem(tp,
7975                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7976                       ((u64) mapping >> 32));
7977         tg3_write_mem(tp,
7978                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7979                       ((u64) mapping & 0xffffffff));
7980         tg3_write_mem(tp,
7981                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7982                        maxlen_flags);
7983
7984         if (!tg3_flag(tp, 5705_PLUS))
7985                 tg3_write_mem(tp,
7986                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7987                               nic_addr);
7988 }
7989
7990 static void __tg3_set_rx_mode(struct net_device *);
7991 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7992 {
7993         int i;
7994
7995         if (!tg3_flag(tp, ENABLE_TSS)) {
7996                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7997                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7998                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7999         } else {
8000                 tw32(HOSTCC_TXCOL_TICKS, 0);
8001                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8002                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8003         }
8004
8005         if (!tg3_flag(tp, ENABLE_RSS)) {
8006                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8007                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8008                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8009         } else {
8010                 tw32(HOSTCC_RXCOL_TICKS, 0);
8011                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8012                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8013         }
8014
8015         if (!tg3_flag(tp, 5705_PLUS)) {
8016                 u32 val = ec->stats_block_coalesce_usecs;
8017
8018                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8019                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8020
8021                 if (!netif_carrier_ok(tp->dev))
8022                         val = 0;
8023
8024                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8025         }
8026
8027         for (i = 0; i < tp->irq_cnt - 1; i++) {
8028                 u32 reg;
8029
8030                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8031                 tw32(reg, ec->rx_coalesce_usecs);
8032                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8033                 tw32(reg, ec->rx_max_coalesced_frames);
8034                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8035                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8036
8037                 if (tg3_flag(tp, ENABLE_TSS)) {
8038                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8039                         tw32(reg, ec->tx_coalesce_usecs);
8040                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8041                         tw32(reg, ec->tx_max_coalesced_frames);
8042                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8043                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8044                 }
8045         }
8046
8047         for (; i < tp->irq_max - 1; i++) {
8048                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8049                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8050                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8051
8052                 if (tg3_flag(tp, ENABLE_TSS)) {
8053                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8054                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8055                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8056                 }
8057         }
8058 }
8059
8060 /* tp->lock is held. */
8061 static void tg3_rings_reset(struct tg3 *tp)
8062 {
8063         int i;
8064         u32 stblk, txrcb, rxrcb, limit;
8065         struct tg3_napi *tnapi = &tp->napi[0];
8066
8067         /* Disable all transmit rings but the first. */
8068         if (!tg3_flag(tp, 5705_PLUS))
8069                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8070         else if (tg3_flag(tp, 5717_PLUS))
8071                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8072         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8073                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8074         else
8075                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8076
8077         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8078              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8079                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8080                               BDINFO_FLAGS_DISABLED);
8081
8082
8083         /* Disable all receive return rings but the first. */
8084         if (tg3_flag(tp, 5717_PLUS))
8085                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8086         else if (!tg3_flag(tp, 5705_PLUS))
8087                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8088         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8089                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8090                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8091         else
8092                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8093
8094         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8095              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8096                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8097                               BDINFO_FLAGS_DISABLED);
8098
8099         /* Disable interrupts */
8100         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8101         tp->napi[0].chk_msi_cnt = 0;
8102         tp->napi[0].last_rx_cons = 0;
8103         tp->napi[0].last_tx_cons = 0;
8104
8105         /* Zero mailbox registers. */
8106         if (tg3_flag(tp, SUPPORT_MSIX)) {
8107                 for (i = 1; i < tp->irq_max; i++) {
8108                         tp->napi[i].tx_prod = 0;
8109                         tp->napi[i].tx_cons = 0;
8110                         if (tg3_flag(tp, ENABLE_TSS))
8111                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8112                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8113                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8114                         tp->napi[i].chk_msi_cnt = 0;
8115                         tp->napi[i].last_rx_cons = 0;
8116                         tp->napi[i].last_tx_cons = 0;
8117                 }
8118                 if (!tg3_flag(tp, ENABLE_TSS))
8119                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8120         } else {
8121                 tp->napi[0].tx_prod = 0;
8122                 tp->napi[0].tx_cons = 0;
8123                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8124                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8125         }
8126
8127         /* Make sure the NIC-based send BD rings are disabled. */
8128         if (!tg3_flag(tp, 5705_PLUS)) {
8129                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8130                 for (i = 0; i < 16; i++)
8131                         tw32_tx_mbox(mbox + i * 8, 0);
8132         }
8133
8134         txrcb = NIC_SRAM_SEND_RCB;
8135         rxrcb = NIC_SRAM_RCV_RET_RCB;
8136
8137         /* Clear status block in ram. */
8138         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8139
8140         /* Set status block DMA address */
8141         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8142              ((u64) tnapi->status_mapping >> 32));
8143         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8144              ((u64) tnapi->status_mapping & 0xffffffff));
8145
8146         if (tnapi->tx_ring) {
8147                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8148                                (TG3_TX_RING_SIZE <<
8149                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8150                                NIC_SRAM_TX_BUFFER_DESC);
8151                 txrcb += TG3_BDINFO_SIZE;
8152         }
8153
8154         if (tnapi->rx_rcb) {
8155                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8156                                (tp->rx_ret_ring_mask + 1) <<
8157                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8158                 rxrcb += TG3_BDINFO_SIZE;
8159         }
8160
8161         stblk = HOSTCC_STATBLCK_RING1;
8162
8163         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8164                 u64 mapping = (u64)tnapi->status_mapping;
8165                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8166                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8167
8168                 /* Clear status block in ram. */
8169                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8170
8171                 if (tnapi->tx_ring) {
8172                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8173                                        (TG3_TX_RING_SIZE <<
8174                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8175                                        NIC_SRAM_TX_BUFFER_DESC);
8176                         txrcb += TG3_BDINFO_SIZE;
8177                 }
8178
8179                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8180                                ((tp->rx_ret_ring_mask + 1) <<
8181                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8182
8183                 stblk += 8;
8184                 rxrcb += TG3_BDINFO_SIZE;
8185         }
8186 }
8187
8188 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8189 {
8190         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8191
8192         if (!tg3_flag(tp, 5750_PLUS) ||
8193             tg3_flag(tp, 5780_CLASS) ||
8194             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8195             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8196                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8197         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8198                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8199                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8200         else
8201                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8202
8203         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8204         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8205
8206         val = min(nic_rep_thresh, host_rep_thresh);
8207         tw32(RCVBDI_STD_THRESH, val);
8208
8209         if (tg3_flag(tp, 57765_PLUS))
8210                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8211
8212         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8213                 return;
8214
8215         if (!tg3_flag(tp, 5705_PLUS))
8216                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8217         else
8218                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8219
8220         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8221
8222         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8223         tw32(RCVBDI_JUMBO_THRESH, val);
8224
8225         if (tg3_flag(tp, 57765_PLUS))
8226                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8227 }
8228
8229 /* tp->lock is held. */
8230 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8231 {
8232         u32 val, rdmac_mode;
8233         int i, err, limit;
8234         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8235
8236         tg3_disable_ints(tp);
8237
8238         tg3_stop_fw(tp);
8239
8240         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8241
8242         if (tg3_flag(tp, INIT_COMPLETE))
8243                 tg3_abort_hw(tp, 1);
8244
8245         /* Enable MAC control of LPI */
8246         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8247                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8248                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8249                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8250
8251                 tw32_f(TG3_CPMU_EEE_CTRL,
8252                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8253
8254                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8255                       TG3_CPMU_EEEMD_LPI_IN_TX |
8256                       TG3_CPMU_EEEMD_LPI_IN_RX |
8257                       TG3_CPMU_EEEMD_EEE_ENABLE;
8258
8259                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8260                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8261
8262                 if (tg3_flag(tp, ENABLE_APE))
8263                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8264
8265                 tw32_f(TG3_CPMU_EEE_MODE, val);
8266
8267                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8268                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8269                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8270
8271                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8272                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8273                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8274         }
8275
8276         if (reset_phy)
8277                 tg3_phy_reset(tp);
8278
8279         err = tg3_chip_reset(tp);
8280         if (err)
8281                 return err;
8282
8283         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8284
8285         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8286                 val = tr32(TG3_CPMU_CTRL);
8287                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8288                 tw32(TG3_CPMU_CTRL, val);
8289
8290                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8291                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8292                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8293                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8294
8295                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8296                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8297                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8298                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8299
8300                 val = tr32(TG3_CPMU_HST_ACC);
8301                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8302                 val |= CPMU_HST_ACC_MACCLK_6_25;
8303                 tw32(TG3_CPMU_HST_ACC, val);
8304         }
8305
8306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8307                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8308                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8309                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8310                 tw32(PCIE_PWR_MGMT_THRESH, val);
8311
8312                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8313                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8314
8315                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8316
8317                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8318                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8319         }
8320
8321         if (tg3_flag(tp, L1PLLPD_EN)) {
8322                 u32 grc_mode = tr32(GRC_MODE);
8323
8324                 /* Access the lower 1K of PL PCIE block registers. */
8325                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8326                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8327
8328                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8329                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8330                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8331
8332                 tw32(GRC_MODE, grc_mode);
8333         }
8334
8335         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8336                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8337                         u32 grc_mode = tr32(GRC_MODE);
8338
8339                         /* Access the lower 1K of PL PCIE block registers. */
8340                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8341                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8342
8343                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8344                                    TG3_PCIE_PL_LO_PHYCTL5);
8345                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8346                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8347
8348                         tw32(GRC_MODE, grc_mode);
8349                 }
8350
8351                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8352                         u32 grc_mode = tr32(GRC_MODE);
8353
8354                         /* Access the lower 1K of DL PCIE block registers. */
8355                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8356                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8357
8358                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8359                                    TG3_PCIE_DL_LO_FTSMAX);
8360                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8361                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8362                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8363
8364                         tw32(GRC_MODE, grc_mode);
8365                 }
8366
8367                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8368                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8369                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8370                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8371         }
8372
8373         /* This works around an issue with Athlon chipsets on
8374          * B3 tigon3 silicon.  This bit has no effect on any
8375          * other revision.  But do not set this on PCI Express
8376          * chips and don't even touch the clocks if the CPMU is present.
8377          */
8378         if (!tg3_flag(tp, CPMU_PRESENT)) {
8379                 if (!tg3_flag(tp, PCI_EXPRESS))
8380                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8381                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8382         }
8383
8384         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8385             tg3_flag(tp, PCIX_MODE)) {
8386                 val = tr32(TG3PCI_PCISTATE);
8387                 val |= PCISTATE_RETRY_SAME_DMA;
8388                 tw32(TG3PCI_PCISTATE, val);
8389         }
8390
8391         if (tg3_flag(tp, ENABLE_APE)) {
8392                 /* Allow reads and writes to the
8393                  * APE register and memory space.
8394                  */
8395                 val = tr32(TG3PCI_PCISTATE);
8396                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8397                        PCISTATE_ALLOW_APE_SHMEM_WR |
8398                        PCISTATE_ALLOW_APE_PSPACE_WR;
8399                 tw32(TG3PCI_PCISTATE, val);
8400         }
8401
8402         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8403                 /* Enable some hw fixes.  */
8404                 val = tr32(TG3PCI_MSI_DATA);
8405                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8406                 tw32(TG3PCI_MSI_DATA, val);
8407         }
8408
8409         /* Descriptor ring init may make accesses to the
8410          * NIC SRAM area to setup the TX descriptors, so we
8411          * can only do this after the hardware has been
8412          * successfully reset.
8413          */
8414         err = tg3_init_rings(tp);
8415         if (err)
8416                 return err;
8417
8418         if (tg3_flag(tp, 57765_PLUS)) {
8419                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8420                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8421                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8422                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8423                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8424                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8425                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8426                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8427         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8428                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8429                 /* This value is determined during the probe time DMA
8430                  * engine test, tg3_test_dma.
8431                  */
8432                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8433         }
8434
8435         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8436                           GRC_MODE_4X_NIC_SEND_RINGS |
8437                           GRC_MODE_NO_TX_PHDR_CSUM |
8438                           GRC_MODE_NO_RX_PHDR_CSUM);
8439         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8440
8441         /* Pseudo-header checksum is done by hardware logic and not
8442          * the offload processers, so make the chip do the pseudo-
8443          * header checksums on receive.  For transmit it is more
8444          * convenient to do the pseudo-header checksum in software
8445          * as Linux does that on transmit for us in all cases.
8446          */
8447         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8448
8449         tw32(GRC_MODE,
8450              tp->grc_mode |
8451              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8452
8453         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8454         val = tr32(GRC_MISC_CFG);
8455         val &= ~0xff;
8456         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8457         tw32(GRC_MISC_CFG, val);
8458
8459         /* Initialize MBUF/DESC pool. */
8460         if (tg3_flag(tp, 5750_PLUS)) {
8461                 /* Do nothing.  */
8462         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8463                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8464                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8465                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8466                 else
8467                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8468                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8469                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8470         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8471                 int fw_len;
8472
8473                 fw_len = tp->fw_len;
8474                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8475                 tw32(BUFMGR_MB_POOL_ADDR,
8476                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8477                 tw32(BUFMGR_MB_POOL_SIZE,
8478                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8479         }
8480
8481         if (tp->dev->mtu <= ETH_DATA_LEN) {
8482                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8483                      tp->bufmgr_config.mbuf_read_dma_low_water);
8484                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8485                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8486                 tw32(BUFMGR_MB_HIGH_WATER,
8487                      tp->bufmgr_config.mbuf_high_water);
8488         } else {
8489                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8490                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8491                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8492                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8493                 tw32(BUFMGR_MB_HIGH_WATER,
8494                      tp->bufmgr_config.mbuf_high_water_jumbo);
8495         }
8496         tw32(BUFMGR_DMA_LOW_WATER,
8497              tp->bufmgr_config.dma_low_water);
8498         tw32(BUFMGR_DMA_HIGH_WATER,
8499              tp->bufmgr_config.dma_high_water);
8500
8501         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8503                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8505             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8506             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8507                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8508         tw32(BUFMGR_MODE, val);
8509         for (i = 0; i < 2000; i++) {
8510                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8511                         break;
8512                 udelay(10);
8513         }
8514         if (i >= 2000) {
8515                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8516                 return -ENODEV;
8517         }
8518
8519         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8520                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8521
8522         tg3_setup_rxbd_thresholds(tp);
8523
8524         /* Initialize TG3_BDINFO's at:
8525          *  RCVDBDI_STD_BD:     standard eth size rx ring
8526          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8527          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8528          *
8529          * like so:
8530          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8531          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8532          *                              ring attribute flags
8533          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8534          *
8535          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8536          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8537          *
8538          * The size of each ring is fixed in the firmware, but the location is
8539          * configurable.
8540          */
8541         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8542              ((u64) tpr->rx_std_mapping >> 32));
8543         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8544              ((u64) tpr->rx_std_mapping & 0xffffffff));
8545         if (!tg3_flag(tp, 5717_PLUS))
8546                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8547                      NIC_SRAM_RX_BUFFER_DESC);
8548
8549         /* Disable the mini ring */
8550         if (!tg3_flag(tp, 5705_PLUS))
8551                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8552                      BDINFO_FLAGS_DISABLED);
8553
8554         /* Program the jumbo buffer descriptor ring control
8555          * blocks on those devices that have them.
8556          */
8557         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8558             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8559
8560                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8561                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8562                              ((u64) tpr->rx_jmb_mapping >> 32));
8563                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8564                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8565                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8566                               BDINFO_FLAGS_MAXLEN_SHIFT;
8567                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8568                              val | BDINFO_FLAGS_USE_EXT_RECV);
8569                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8570                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8571                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8572                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8573                 } else {
8574                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8575                              BDINFO_FLAGS_DISABLED);
8576                 }
8577
8578                 if (tg3_flag(tp, 57765_PLUS)) {
8579                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8580                                 val = TG3_RX_STD_MAX_SIZE_5700;
8581                         else
8582                                 val = TG3_RX_STD_MAX_SIZE_5717;
8583                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8584                         val |= (TG3_RX_STD_DMA_SZ << 2);
8585                 } else
8586                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8587         } else
8588                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8589
8590         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8591
8592         tpr->rx_std_prod_idx = tp->rx_pending;
8593         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8594
8595         tpr->rx_jmb_prod_idx =
8596                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8597         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8598
8599         tg3_rings_reset(tp);
8600
8601         /* Initialize MAC address and backoff seed. */
8602         __tg3_set_mac_addr(tp, 0);
8603
8604         /* MTU + ethernet header + FCS + optional VLAN tag */
8605         tw32(MAC_RX_MTU_SIZE,
8606              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8607
8608         /* The slot time is changed by tg3_setup_phy if we
8609          * run at gigabit with half duplex.
8610          */
8611         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8612               (6 << TX_LENGTHS_IPG_SHIFT) |
8613               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8614
8615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8616                 val |= tr32(MAC_TX_LENGTHS) &
8617                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8618                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8619
8620         tw32(MAC_TX_LENGTHS, val);
8621
8622         /* Receive rules. */
8623         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8624         tw32(RCVLPC_CONFIG, 0x0181);
8625
8626         /* Calculate RDMAC_MODE setting early, we need it to determine
8627          * the RCVLPC_STATE_ENABLE mask.
8628          */
8629         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8630                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8631                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8632                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8633                       RDMAC_MODE_LNGREAD_ENAB);
8634
8635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8636                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8637
8638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8639             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8640             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8641                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8642                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8643                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8644
8645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8646             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8647                 if (tg3_flag(tp, TSO_CAPABLE) &&
8648                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8649                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8650                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8651                            !tg3_flag(tp, IS_5788)) {
8652                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8653                 }
8654         }
8655
8656         if (tg3_flag(tp, PCI_EXPRESS))
8657                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8658
8659         if (tg3_flag(tp, HW_TSO_1) ||
8660             tg3_flag(tp, HW_TSO_2) ||
8661             tg3_flag(tp, HW_TSO_3))
8662                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8663
8664         if (tg3_flag(tp, 57765_PLUS) ||
8665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8666             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8667                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8668
8669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8670                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8671
8672         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8676             tg3_flag(tp, 57765_PLUS)) {
8677                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8678                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8679                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8680                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8681                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8682                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8683                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8684                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8685                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8686                 }
8687                 tw32(TG3_RDMA_RSRVCTRL_REG,
8688                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8689         }
8690
8691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8692             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8693                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8694                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8695                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8696                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8697         }
8698
8699         /* Receive/send statistics. */
8700         if (tg3_flag(tp, 5750_PLUS)) {
8701                 val = tr32(RCVLPC_STATS_ENABLE);
8702                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8703                 tw32(RCVLPC_STATS_ENABLE, val);
8704         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8705                    tg3_flag(tp, TSO_CAPABLE)) {
8706                 val = tr32(RCVLPC_STATS_ENABLE);
8707                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8708                 tw32(RCVLPC_STATS_ENABLE, val);
8709         } else {
8710                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8711         }
8712         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8713         tw32(SNDDATAI_STATSENAB, 0xffffff);
8714         tw32(SNDDATAI_STATSCTRL,
8715              (SNDDATAI_SCTRL_ENABLE |
8716               SNDDATAI_SCTRL_FASTUPD));
8717
8718         /* Setup host coalescing engine. */
8719         tw32(HOSTCC_MODE, 0);
8720         for (i = 0; i < 2000; i++) {
8721                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8722                         break;
8723                 udelay(10);
8724         }
8725
8726         __tg3_set_coalesce(tp, &tp->coal);
8727
8728         if (!tg3_flag(tp, 5705_PLUS)) {
8729                 /* Status/statistics block address.  See tg3_timer,
8730                  * the tg3_periodic_fetch_stats call there, and
8731                  * tg3_get_stats to see how this works for 5705/5750 chips.
8732                  */
8733                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8734                      ((u64) tp->stats_mapping >> 32));
8735                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8736                      ((u64) tp->stats_mapping & 0xffffffff));
8737                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8738
8739                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8740
8741                 /* Clear statistics and status block memory areas */
8742                 for (i = NIC_SRAM_STATS_BLK;
8743                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8744                      i += sizeof(u32)) {
8745                         tg3_write_mem(tp, i, 0);
8746                         udelay(40);
8747                 }
8748         }
8749
8750         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8751
8752         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8753         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8754         if (!tg3_flag(tp, 5705_PLUS))
8755                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8756
8757         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8758                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8759                 /* reset to prevent losing 1st rx packet intermittently */
8760                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8761                 udelay(10);
8762         }
8763
8764         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8765                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8766                         MAC_MODE_FHDE_ENABLE;
8767         if (tg3_flag(tp, ENABLE_APE))
8768                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8769         if (!tg3_flag(tp, 5705_PLUS) &&
8770             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8771             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8772                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8773         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8774         udelay(40);
8775
8776         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8777          * If TG3_FLAG_IS_NIC is zero, we should read the
8778          * register to preserve the GPIO settings for LOMs. The GPIOs,
8779          * whether used as inputs or outputs, are set by boot code after
8780          * reset.
8781          */
8782         if (!tg3_flag(tp, IS_NIC)) {
8783                 u32 gpio_mask;
8784
8785                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8786                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8787                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8788
8789                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8790                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8791                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8792
8793                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8794                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8795
8796                 tp->grc_local_ctrl &= ~gpio_mask;
8797                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8798
8799                 /* GPIO1 must be driven high for eeprom write protect */
8800                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8801                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8802                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8803         }
8804         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8805         udelay(100);
8806
8807         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8808                 val = tr32(MSGINT_MODE);
8809                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8810                 if (!tg3_flag(tp, 1SHOT_MSI))
8811                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8812                 tw32(MSGINT_MODE, val);
8813         }
8814
8815         if (!tg3_flag(tp, 5705_PLUS)) {
8816                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8817                 udelay(40);
8818         }
8819
8820         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8821                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8822                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8823                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8824                WDMAC_MODE_LNGREAD_ENAB);
8825
8826         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8827             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8828                 if (tg3_flag(tp, TSO_CAPABLE) &&
8829                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8830                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8831                         /* nothing */
8832                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8833                            !tg3_flag(tp, IS_5788)) {
8834                         val |= WDMAC_MODE_RX_ACCEL;
8835                 }
8836         }
8837
8838         /* Enable host coalescing bug fix */
8839         if (tg3_flag(tp, 5755_PLUS))
8840                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8841
8842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8843                 val |= WDMAC_MODE_BURST_ALL_DATA;
8844
8845         tw32_f(WDMAC_MODE, val);
8846         udelay(40);
8847
8848         if (tg3_flag(tp, PCIX_MODE)) {
8849                 u16 pcix_cmd;
8850
8851                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8852                                      &pcix_cmd);
8853                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8854                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8855                         pcix_cmd |= PCI_X_CMD_READ_2K;
8856                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8857                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8858                         pcix_cmd |= PCI_X_CMD_READ_2K;
8859                 }
8860                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8861                                       pcix_cmd);
8862         }
8863
8864         tw32_f(RDMAC_MODE, rdmac_mode);
8865         udelay(40);
8866
8867         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8868         if (!tg3_flag(tp, 5705_PLUS))
8869                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8870
8871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8872                 tw32(SNDDATAC_MODE,
8873                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8874         else
8875                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8876
8877         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8878         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8879         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8880         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8881                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8882         tw32(RCVDBDI_MODE, val);
8883         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8884         if (tg3_flag(tp, HW_TSO_1) ||
8885             tg3_flag(tp, HW_TSO_2) ||
8886             tg3_flag(tp, HW_TSO_3))
8887                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8888         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8889         if (tg3_flag(tp, ENABLE_TSS))
8890                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8891         tw32(SNDBDI_MODE, val);
8892         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8893
8894         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8895                 err = tg3_load_5701_a0_firmware_fix(tp);
8896                 if (err)
8897                         return err;
8898         }
8899
8900         if (tg3_flag(tp, TSO_CAPABLE)) {
8901                 err = tg3_load_tso_firmware(tp);
8902                 if (err)
8903                         return err;
8904         }
8905
8906         tp->tx_mode = TX_MODE_ENABLE;
8907
8908         if (tg3_flag(tp, 5755_PLUS) ||
8909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8910                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8911
8912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8913                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8914                 tp->tx_mode &= ~val;
8915                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8916         }
8917
8918         tw32_f(MAC_TX_MODE, tp->tx_mode);
8919         udelay(100);
8920
8921         if (tg3_flag(tp, ENABLE_RSS)) {
8922                 int i = 0;
8923                 u32 reg = MAC_RSS_INDIR_TBL_0;
8924
8925                 if (tp->irq_cnt == 2) {
8926                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8927                                 tw32(reg, 0x0);
8928                                 reg += 4;
8929                         }
8930                 } else {
8931                         u32 val;
8932
8933                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8934                                 val = i % (tp->irq_cnt - 1);
8935                                 i++;
8936                                 for (; i % 8; i++) {
8937                                         val <<= 4;
8938                                         val |= (i % (tp->irq_cnt - 1));
8939                                 }
8940                                 tw32(reg, val);
8941                                 reg += 4;
8942                         }
8943                 }
8944
8945                 /* Setup the "secret" hash key. */
8946                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8947                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8948                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8949                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8950                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8951                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8952                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8953                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8954                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8955                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8956         }
8957
8958         tp->rx_mode = RX_MODE_ENABLE;
8959         if (tg3_flag(tp, 5755_PLUS))
8960                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8961
8962         if (tg3_flag(tp, ENABLE_RSS))
8963                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8964                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8965                                RX_MODE_RSS_IPV6_HASH_EN |
8966                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8967                                RX_MODE_RSS_IPV4_HASH_EN |
8968                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8969
8970         tw32_f(MAC_RX_MODE, tp->rx_mode);
8971         udelay(10);
8972
8973         tw32(MAC_LED_CTRL, tp->led_ctrl);
8974
8975         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8976         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8977                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8978                 udelay(10);
8979         }
8980         tw32_f(MAC_RX_MODE, tp->rx_mode);
8981         udelay(10);
8982
8983         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8984                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8985                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8986                         /* Set drive transmission level to 1.2V  */
8987                         /* only if the signal pre-emphasis bit is not set  */
8988                         val = tr32(MAC_SERDES_CFG);
8989                         val &= 0xfffff000;
8990                         val |= 0x880;
8991                         tw32(MAC_SERDES_CFG, val);
8992                 }
8993                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8994                         tw32(MAC_SERDES_CFG, 0x616000);
8995         }
8996
8997         /* Prevent chip from dropping frames when flow control
8998          * is enabled.
8999          */
9000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9001                 val = 1;
9002         else
9003                 val = 2;
9004         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9005
9006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9007             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9008                 /* Use hardware link auto-negotiation */
9009                 tg3_flag_set(tp, HW_AUTONEG);
9010         }
9011
9012         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9013             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9014                 u32 tmp;
9015
9016                 tmp = tr32(SERDES_RX_CTRL);
9017                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9018                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9019                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9020                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9021         }
9022
9023         if (!tg3_flag(tp, USE_PHYLIB)) {
9024                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9025                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9026                         tp->link_config.speed = tp->link_config.orig_speed;
9027                         tp->link_config.duplex = tp->link_config.orig_duplex;
9028                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9029                 }
9030
9031                 err = tg3_setup_phy(tp, 0);
9032                 if (err)
9033                         return err;
9034
9035                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9036                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9037                         u32 tmp;
9038
9039                         /* Clear CRC stats. */
9040                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9041                                 tg3_writephy(tp, MII_TG3_TEST1,
9042                                              tmp | MII_TG3_TEST1_CRC_EN);
9043                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9044                         }
9045                 }
9046         }
9047
9048         __tg3_set_rx_mode(tp->dev);
9049
9050         /* Initialize receive rules. */
9051         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9052         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9053         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9054         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9055
9056         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9057                 limit = 8;
9058         else
9059                 limit = 16;
9060         if (tg3_flag(tp, ENABLE_ASF))
9061                 limit -= 4;
9062         switch (limit) {
9063         case 16:
9064                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9065         case 15:
9066                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9067         case 14:
9068                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9069         case 13:
9070                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9071         case 12:
9072                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9073         case 11:
9074                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9075         case 10:
9076                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9077         case 9:
9078                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9079         case 8:
9080                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9081         case 7:
9082                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9083         case 6:
9084                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9085         case 5:
9086                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9087         case 4:
9088                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9089         case 3:
9090                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9091         case 2:
9092         case 1:
9093
9094         default:
9095                 break;
9096         }
9097
9098         if (tg3_flag(tp, ENABLE_APE))
9099                 /* Write our heartbeat update interval to APE. */
9100                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9101                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9102
9103         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9104
9105         return 0;
9106 }
9107
9108 /* Called at device open time to get the chip ready for
9109  * packet processing.  Invoked with tp->lock held.
9110  */
9111 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9112 {
9113         tg3_switch_clocks(tp);
9114
9115         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9116
9117         return tg3_reset_hw(tp, reset_phy);
9118 }
9119
9120 #define TG3_STAT_ADD32(PSTAT, REG) \
9121 do {    u32 __val = tr32(REG); \
9122         (PSTAT)->low += __val; \
9123         if ((PSTAT)->low < __val) \
9124                 (PSTAT)->high += 1; \
9125 } while (0)
9126
9127 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9128 {
9129         struct tg3_hw_stats *sp = tp->hw_stats;
9130
9131         if (!netif_carrier_ok(tp->dev))
9132                 return;
9133
9134         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9135         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9136         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9137         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9138         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9139         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9140         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9141         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9142         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9143         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9144         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9145         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9146         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9147
9148         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9149         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9150         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9151         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9152         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9153         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9154         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9155         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9156         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9157         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9158         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9159         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9160         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9161         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9162
9163         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9164         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9165             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9166             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9167                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9168         } else {
9169                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9170                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9171                 if (val) {
9172                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9173                         sp->rx_discards.low += val;
9174                         if (sp->rx_discards.low < val)
9175                                 sp->rx_discards.high += 1;
9176                 }
9177                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9178         }
9179         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9180 }
9181
9182 static void tg3_chk_missed_msi(struct tg3 *tp)
9183 {
9184         u32 i;
9185
9186         for (i = 0; i < tp->irq_cnt; i++) {
9187                 struct tg3_napi *tnapi = &tp->napi[i];
9188
9189                 if (tg3_has_work(tnapi)) {
9190                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9191                             tnapi->last_tx_cons == tnapi->tx_cons) {
9192                                 if (tnapi->chk_msi_cnt < 1) {
9193                                         tnapi->chk_msi_cnt++;
9194                                         return;
9195                                 }
9196                                 tg3_msi(0, tnapi);
9197                         }
9198                 }
9199                 tnapi->chk_msi_cnt = 0;
9200                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9201                 tnapi->last_tx_cons = tnapi->tx_cons;
9202         }
9203 }
9204
9205 static void tg3_timer(unsigned long __opaque)
9206 {
9207         struct tg3 *tp = (struct tg3 *) __opaque;
9208
9209         if (tp->irq_sync)
9210                 goto restart_timer;
9211
9212         spin_lock(&tp->lock);
9213
9214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9215             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9216                 tg3_chk_missed_msi(tp);
9217
9218         if (!tg3_flag(tp, TAGGED_STATUS)) {
9219                 /* All of this garbage is because when using non-tagged
9220                  * IRQ status the mailbox/status_block protocol the chip
9221                  * uses with the cpu is race prone.
9222                  */
9223                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9224                         tw32(GRC_LOCAL_CTRL,
9225                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9226                 } else {
9227                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9228                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9229                 }
9230
9231                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9232                         tg3_flag_set(tp, RESTART_TIMER);
9233                         spin_unlock(&tp->lock);
9234                         schedule_work(&tp->reset_task);
9235                         return;
9236                 }
9237         }
9238
9239         /* This part only runs once per second. */
9240         if (!--tp->timer_counter) {
9241                 if (tg3_flag(tp, 5705_PLUS))
9242                         tg3_periodic_fetch_stats(tp);
9243
9244                 if (tp->setlpicnt && !--tp->setlpicnt)
9245                         tg3_phy_eee_enable(tp);
9246
9247                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9248                         u32 mac_stat;
9249                         int phy_event;
9250
9251                         mac_stat = tr32(MAC_STATUS);
9252
9253                         phy_event = 0;
9254                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9255                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9256                                         phy_event = 1;
9257                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9258                                 phy_event = 1;
9259
9260                         if (phy_event)
9261                                 tg3_setup_phy(tp, 0);
9262                 } else if (tg3_flag(tp, POLL_SERDES)) {
9263                         u32 mac_stat = tr32(MAC_STATUS);
9264                         int need_setup = 0;
9265
9266                         if (netif_carrier_ok(tp->dev) &&
9267                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9268                                 need_setup = 1;
9269                         }
9270                         if (!netif_carrier_ok(tp->dev) &&
9271                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9272                                          MAC_STATUS_SIGNAL_DET))) {
9273                                 need_setup = 1;
9274                         }
9275                         if (need_setup) {
9276                                 if (!tp->serdes_counter) {
9277                                         tw32_f(MAC_MODE,
9278                                              (tp->mac_mode &
9279                                               ~MAC_MODE_PORT_MODE_MASK));
9280                                         udelay(40);
9281                                         tw32_f(MAC_MODE, tp->mac_mode);
9282                                         udelay(40);
9283                                 }
9284                                 tg3_setup_phy(tp, 0);
9285                         }
9286                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9287                            tg3_flag(tp, 5780_CLASS)) {
9288                         tg3_serdes_parallel_detect(tp);
9289                 }
9290
9291                 tp->timer_counter = tp->timer_multiplier;
9292         }
9293
9294         /* Heartbeat is only sent once every 2 seconds.
9295          *
9296          * The heartbeat is to tell the ASF firmware that the host
9297          * driver is still alive.  In the event that the OS crashes,
9298          * ASF needs to reset the hardware to free up the FIFO space
9299          * that may be filled with rx packets destined for the host.
9300          * If the FIFO is full, ASF will no longer function properly.
9301          *
9302          * Unintended resets have been reported on real time kernels
9303          * where the timer doesn't run on time.  Netpoll will also have
9304          * same problem.
9305          *
9306          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9307          * to check the ring condition when the heartbeat is expiring
9308          * before doing the reset.  This will prevent most unintended
9309          * resets.
9310          */
9311         if (!--tp->asf_counter) {
9312                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9313                         tg3_wait_for_event_ack(tp);
9314
9315                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9316                                       FWCMD_NICDRV_ALIVE3);
9317                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9318                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9319                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9320
9321                         tg3_generate_fw_event(tp);
9322                 }
9323                 tp->asf_counter = tp->asf_multiplier;
9324         }
9325
9326         spin_unlock(&tp->lock);
9327
9328 restart_timer:
9329         tp->timer.expires = jiffies + tp->timer_offset;
9330         add_timer(&tp->timer);
9331 }
9332
9333 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9334 {
9335         irq_handler_t fn;
9336         unsigned long flags;
9337         char *name;
9338         struct tg3_napi *tnapi = &tp->napi[irq_num];
9339
9340         if (tp->irq_cnt == 1)
9341                 name = tp->dev->name;
9342         else {
9343                 name = &tnapi->irq_lbl[0];
9344                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9345                 name[IFNAMSIZ-1] = 0;
9346         }
9347
9348         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9349                 fn = tg3_msi;
9350                 if (tg3_flag(tp, 1SHOT_MSI))
9351                         fn = tg3_msi_1shot;
9352                 flags = 0;
9353         } else {
9354                 fn = tg3_interrupt;
9355                 if (tg3_flag(tp, TAGGED_STATUS))
9356                         fn = tg3_interrupt_tagged;
9357                 flags = IRQF_SHARED;
9358         }
9359
9360         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9361 }
9362
9363 static int tg3_test_interrupt(struct tg3 *tp)
9364 {
9365         struct tg3_napi *tnapi = &tp->napi[0];
9366         struct net_device *dev = tp->dev;
9367         int err, i, intr_ok = 0;
9368         u32 val;
9369
9370         if (!netif_running(dev))
9371                 return -ENODEV;
9372
9373         tg3_disable_ints(tp);
9374
9375         free_irq(tnapi->irq_vec, tnapi);
9376
9377         /*
9378          * Turn off MSI one shot mode.  Otherwise this test has no
9379          * observable way to know whether the interrupt was delivered.
9380          */
9381         if (tg3_flag(tp, 57765_PLUS)) {
9382                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9383                 tw32(MSGINT_MODE, val);
9384         }
9385
9386         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9387                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9388         if (err)
9389                 return err;
9390
9391         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9392         tg3_enable_ints(tp);
9393
9394         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9395                tnapi->coal_now);
9396
9397         for (i = 0; i < 5; i++) {
9398                 u32 int_mbox, misc_host_ctrl;
9399
9400                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9401                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9402
9403                 if ((int_mbox != 0) ||
9404                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9405                         intr_ok = 1;
9406                         break;
9407                 }
9408
9409                 if (tg3_flag(tp, 57765_PLUS) &&
9410                     tnapi->hw_status->status_tag != tnapi->last_tag)
9411                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9412
9413                 msleep(10);
9414         }
9415
9416         tg3_disable_ints(tp);
9417
9418         free_irq(tnapi->irq_vec, tnapi);
9419
9420         err = tg3_request_irq(tp, 0);
9421
9422         if (err)
9423                 return err;
9424
9425         if (intr_ok) {
9426                 /* Reenable MSI one shot mode. */
9427                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9428                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9429                         tw32(MSGINT_MODE, val);
9430                 }
9431                 return 0;
9432         }
9433
9434         return -EIO;
9435 }
9436
9437 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9438  * successfully restored
9439  */
9440 static int tg3_test_msi(struct tg3 *tp)
9441 {
9442         int err;
9443         u16 pci_cmd;
9444
9445         if (!tg3_flag(tp, USING_MSI))
9446                 return 0;
9447
9448         /* Turn off SERR reporting in case MSI terminates with Master
9449          * Abort.
9450          */
9451         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9452         pci_write_config_word(tp->pdev, PCI_COMMAND,
9453                               pci_cmd & ~PCI_COMMAND_SERR);
9454
9455         err = tg3_test_interrupt(tp);
9456
9457         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9458
9459         if (!err)
9460                 return 0;
9461
9462         /* other failures */
9463         if (err != -EIO)
9464                 return err;
9465
9466         /* MSI test failed, go back to INTx mode */
9467         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9468                     "to INTx mode. Please report this failure to the PCI "
9469                     "maintainer and include system chipset information\n");
9470
9471         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9472
9473         pci_disable_msi(tp->pdev);
9474
9475         tg3_flag_clear(tp, USING_MSI);
9476         tp->napi[0].irq_vec = tp->pdev->irq;
9477
9478         err = tg3_request_irq(tp, 0);
9479         if (err)
9480                 return err;
9481
9482         /* Need to reset the chip because the MSI cycle may have terminated
9483          * with Master Abort.
9484          */
9485         tg3_full_lock(tp, 1);
9486
9487         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9488         err = tg3_init_hw(tp, 1);
9489
9490         tg3_full_unlock(tp);
9491
9492         if (err)
9493                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9494
9495         return err;
9496 }
9497
9498 static int tg3_request_firmware(struct tg3 *tp)
9499 {
9500         const __be32 *fw_data;
9501
9502         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9503                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9504                            tp->fw_needed);
9505                 return -ENOENT;
9506         }
9507
9508         fw_data = (void *)tp->fw->data;
9509
9510         /* Firmware blob starts with version numbers, followed by
9511          * start address and _full_ length including BSS sections
9512          * (which must be longer than the actual data, of course
9513          */
9514
9515         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9516         if (tp->fw_len < (tp->fw->size - 12)) {
9517                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9518                            tp->fw_len, tp->fw_needed);
9519                 release_firmware(tp->fw);
9520                 tp->fw = NULL;
9521                 return -EINVAL;
9522         }
9523
9524         /* We no longer need firmware; we have it. */
9525         tp->fw_needed = NULL;
9526         return 0;
9527 }
9528
9529 static bool tg3_enable_msix(struct tg3 *tp)
9530 {
9531         int i, rc, cpus = num_online_cpus();
9532         struct msix_entry msix_ent[tp->irq_max];
9533
9534         if (cpus == 1)
9535                 /* Just fallback to the simpler MSI mode. */
9536                 return false;
9537
9538         /*
9539          * We want as many rx rings enabled as there are cpus.
9540          * The first MSIX vector only deals with link interrupts, etc,
9541          * so we add one to the number of vectors we are requesting.
9542          */
9543         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9544
9545         for (i = 0; i < tp->irq_max; i++) {
9546                 msix_ent[i].entry  = i;
9547                 msix_ent[i].vector = 0;
9548         }
9549
9550         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9551         if (rc < 0) {
9552                 return false;
9553         } else if (rc != 0) {
9554                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9555                         return false;
9556                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9557                               tp->irq_cnt, rc);
9558                 tp->irq_cnt = rc;
9559         }
9560
9561         for (i = 0; i < tp->irq_max; i++)
9562                 tp->napi[i].irq_vec = msix_ent[i].vector;
9563
9564         netif_set_real_num_tx_queues(tp->dev, 1);
9565         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9566         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9567                 pci_disable_msix(tp->pdev);
9568                 return false;
9569         }
9570
9571         if (tp->irq_cnt > 1) {
9572                 tg3_flag_set(tp, ENABLE_RSS);
9573
9574                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9575                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9576                         tg3_flag_set(tp, ENABLE_TSS);
9577                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9578                 }
9579         }
9580
9581         return true;
9582 }
9583
9584 static void tg3_ints_init(struct tg3 *tp)
9585 {
9586         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9587             !tg3_flag(tp, TAGGED_STATUS)) {
9588                 /* All MSI supporting chips should support tagged
9589                  * status.  Assert that this is the case.
9590                  */
9591                 netdev_warn(tp->dev,
9592                             "MSI without TAGGED_STATUS? Not using MSI\n");
9593                 goto defcfg;
9594         }
9595
9596         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9597                 tg3_flag_set(tp, USING_MSIX);
9598         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9599                 tg3_flag_set(tp, USING_MSI);
9600
9601         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9602                 u32 msi_mode = tr32(MSGINT_MODE);
9603                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9604                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9605                 if (!tg3_flag(tp, 1SHOT_MSI))
9606                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9607                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9608         }
9609 defcfg:
9610         if (!tg3_flag(tp, USING_MSIX)) {
9611                 tp->irq_cnt = 1;
9612                 tp->napi[0].irq_vec = tp->pdev->irq;
9613                 netif_set_real_num_tx_queues(tp->dev, 1);
9614                 netif_set_real_num_rx_queues(tp->dev, 1);
9615         }
9616 }
9617
9618 static void tg3_ints_fini(struct tg3 *tp)
9619 {
9620         if (tg3_flag(tp, USING_MSIX))
9621                 pci_disable_msix(tp->pdev);
9622         else if (tg3_flag(tp, USING_MSI))
9623                 pci_disable_msi(tp->pdev);
9624         tg3_flag_clear(tp, USING_MSI);
9625         tg3_flag_clear(tp, USING_MSIX);
9626         tg3_flag_clear(tp, ENABLE_RSS);
9627         tg3_flag_clear(tp, ENABLE_TSS);
9628 }
9629
9630 static int tg3_open(struct net_device *dev)
9631 {
9632         struct tg3 *tp = netdev_priv(dev);
9633         int i, err;
9634
9635         if (tp->fw_needed) {
9636                 err = tg3_request_firmware(tp);
9637                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9638                         if (err)
9639                                 return err;
9640                 } else if (err) {
9641                         netdev_warn(tp->dev, "TSO capability disabled\n");
9642                         tg3_flag_clear(tp, TSO_CAPABLE);
9643                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9644                         netdev_notice(tp->dev, "TSO capability restored\n");
9645                         tg3_flag_set(tp, TSO_CAPABLE);
9646                 }
9647         }
9648
9649         netif_carrier_off(tp->dev);
9650
9651         err = tg3_power_up(tp);
9652         if (err)
9653                 return err;
9654
9655         tg3_full_lock(tp, 0);
9656
9657         tg3_disable_ints(tp);
9658         tg3_flag_clear(tp, INIT_COMPLETE);
9659
9660         tg3_full_unlock(tp);
9661
9662         /*
9663          * Setup interrupts first so we know how
9664          * many NAPI resources to allocate
9665          */
9666         tg3_ints_init(tp);
9667
9668         /* The placement of this call is tied
9669          * to the setup and use of Host TX descriptors.
9670          */
9671         err = tg3_alloc_consistent(tp);
9672         if (err)
9673                 goto err_out1;
9674
9675         tg3_napi_init(tp);
9676
9677         tg3_napi_enable(tp);
9678
9679         for (i = 0; i < tp->irq_cnt; i++) {
9680                 struct tg3_napi *tnapi = &tp->napi[i];
9681                 err = tg3_request_irq(tp, i);
9682                 if (err) {
9683                         for (i--; i >= 0; i--)
9684                                 free_irq(tnapi->irq_vec, tnapi);
9685                         break;
9686                 }
9687         }
9688
9689         if (err)
9690                 goto err_out2;
9691
9692         tg3_full_lock(tp, 0);
9693
9694         err = tg3_init_hw(tp, 1);
9695         if (err) {
9696                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9697                 tg3_free_rings(tp);
9698         } else {
9699                 if (tg3_flag(tp, TAGGED_STATUS) &&
9700                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9701                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9702                         tp->timer_offset = HZ;
9703                 else
9704                         tp->timer_offset = HZ / 10;
9705
9706                 BUG_ON(tp->timer_offset > HZ);
9707                 tp->timer_counter = tp->timer_multiplier =
9708                         (HZ / tp->timer_offset);
9709                 tp->asf_counter = tp->asf_multiplier =
9710                         ((HZ / tp->timer_offset) * 2);
9711
9712                 init_timer(&tp->timer);
9713                 tp->timer.expires = jiffies + tp->timer_offset;
9714                 tp->timer.data = (unsigned long) tp;
9715                 tp->timer.function = tg3_timer;
9716         }
9717
9718         tg3_full_unlock(tp);
9719
9720         if (err)
9721                 goto err_out3;
9722
9723         if (tg3_flag(tp, USING_MSI)) {
9724                 err = tg3_test_msi(tp);
9725
9726                 if (err) {
9727                         tg3_full_lock(tp, 0);
9728                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9729                         tg3_free_rings(tp);
9730                         tg3_full_unlock(tp);
9731
9732                         goto err_out2;
9733                 }
9734
9735                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9736                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9737
9738                         tw32(PCIE_TRANSACTION_CFG,
9739                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9740                 }
9741         }
9742
9743         tg3_phy_start(tp);
9744
9745         tg3_full_lock(tp, 0);
9746
9747         add_timer(&tp->timer);
9748         tg3_flag_set(tp, INIT_COMPLETE);
9749         tg3_enable_ints(tp);
9750
9751         tg3_full_unlock(tp);
9752
9753         netif_tx_start_all_queues(dev);
9754
9755         /*
9756          * Reset loopback feature if it was turned on while the device was down
9757          * make sure that it's installed properly now.
9758          */
9759         if (dev->features & NETIF_F_LOOPBACK)
9760                 tg3_set_loopback(dev, dev->features);
9761
9762         return 0;
9763
9764 err_out3:
9765         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9766                 struct tg3_napi *tnapi = &tp->napi[i];
9767                 free_irq(tnapi->irq_vec, tnapi);
9768         }
9769
9770 err_out2:
9771         tg3_napi_disable(tp);
9772         tg3_napi_fini(tp);
9773         tg3_free_consistent(tp);
9774
9775 err_out1:
9776         tg3_ints_fini(tp);
9777         tg3_frob_aux_power(tp, false);
9778         pci_set_power_state(tp->pdev, PCI_D3hot);
9779         return err;
9780 }
9781
9782 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9783                                                  struct rtnl_link_stats64 *);
9784 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9785
9786 static int tg3_close(struct net_device *dev)
9787 {
9788         int i;
9789         struct tg3 *tp = netdev_priv(dev);
9790
9791         tg3_napi_disable(tp);
9792         cancel_work_sync(&tp->reset_task);
9793
9794         netif_tx_stop_all_queues(dev);
9795
9796         del_timer_sync(&tp->timer);
9797
9798         tg3_phy_stop(tp);
9799
9800         tg3_full_lock(tp, 1);
9801
9802         tg3_disable_ints(tp);
9803
9804         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9805         tg3_free_rings(tp);
9806         tg3_flag_clear(tp, INIT_COMPLETE);
9807
9808         tg3_full_unlock(tp);
9809
9810         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9811                 struct tg3_napi *tnapi = &tp->napi[i];
9812                 free_irq(tnapi->irq_vec, tnapi);
9813         }
9814
9815         tg3_ints_fini(tp);
9816
9817         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9818
9819         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9820                sizeof(tp->estats_prev));
9821
9822         tg3_napi_fini(tp);
9823
9824         tg3_free_consistent(tp);
9825
9826         tg3_power_down(tp);
9827
9828         netif_carrier_off(tp->dev);
9829
9830         return 0;
9831 }
9832
9833 static inline u64 get_stat64(tg3_stat64_t *val)
9834 {
9835        return ((u64)val->high << 32) | ((u64)val->low);
9836 }
9837
9838 static u64 calc_crc_errors(struct tg3 *tp)
9839 {
9840         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9841
9842         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9843             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9844              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9845                 u32 val;
9846
9847                 spin_lock_bh(&tp->lock);
9848                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9849                         tg3_writephy(tp, MII_TG3_TEST1,
9850                                      val | MII_TG3_TEST1_CRC_EN);
9851                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9852                 } else
9853                         val = 0;
9854                 spin_unlock_bh(&tp->lock);
9855
9856                 tp->phy_crc_errors += val;
9857
9858                 return tp->phy_crc_errors;
9859         }
9860
9861         return get_stat64(&hw_stats->rx_fcs_errors);
9862 }
9863
9864 #define ESTAT_ADD(member) \
9865         estats->member =        old_estats->member + \
9866                                 get_stat64(&hw_stats->member)
9867
9868 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9869 {
9870         struct tg3_ethtool_stats *estats = &tp->estats;
9871         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9872         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9873
9874         if (!hw_stats)
9875                 return old_estats;
9876
9877         ESTAT_ADD(rx_octets);
9878         ESTAT_ADD(rx_fragments);
9879         ESTAT_ADD(rx_ucast_packets);
9880         ESTAT_ADD(rx_mcast_packets);
9881         ESTAT_ADD(rx_bcast_packets);
9882         ESTAT_ADD(rx_fcs_errors);
9883         ESTAT_ADD(rx_align_errors);
9884         ESTAT_ADD(rx_xon_pause_rcvd);
9885         ESTAT_ADD(rx_xoff_pause_rcvd);
9886         ESTAT_ADD(rx_mac_ctrl_rcvd);
9887         ESTAT_ADD(rx_xoff_entered);
9888         ESTAT_ADD(rx_frame_too_long_errors);
9889         ESTAT_ADD(rx_jabbers);
9890         ESTAT_ADD(rx_undersize_packets);
9891         ESTAT_ADD(rx_in_length_errors);
9892         ESTAT_ADD(rx_out_length_errors);
9893         ESTAT_ADD(rx_64_or_less_octet_packets);
9894         ESTAT_ADD(rx_65_to_127_octet_packets);
9895         ESTAT_ADD(rx_128_to_255_octet_packets);
9896         ESTAT_ADD(rx_256_to_511_octet_packets);
9897         ESTAT_ADD(rx_512_to_1023_octet_packets);
9898         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9899         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9900         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9901         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9902         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9903
9904         ESTAT_ADD(tx_octets);
9905         ESTAT_ADD(tx_collisions);
9906         ESTAT_ADD(tx_xon_sent);
9907         ESTAT_ADD(tx_xoff_sent);
9908         ESTAT_ADD(tx_flow_control);
9909         ESTAT_ADD(tx_mac_errors);
9910         ESTAT_ADD(tx_single_collisions);
9911         ESTAT_ADD(tx_mult_collisions);
9912         ESTAT_ADD(tx_deferred);
9913         ESTAT_ADD(tx_excessive_collisions);
9914         ESTAT_ADD(tx_late_collisions);
9915         ESTAT_ADD(tx_collide_2times);
9916         ESTAT_ADD(tx_collide_3times);
9917         ESTAT_ADD(tx_collide_4times);
9918         ESTAT_ADD(tx_collide_5times);
9919         ESTAT_ADD(tx_collide_6times);
9920         ESTAT_ADD(tx_collide_7times);
9921         ESTAT_ADD(tx_collide_8times);
9922         ESTAT_ADD(tx_collide_9times);
9923         ESTAT_ADD(tx_collide_10times);
9924         ESTAT_ADD(tx_collide_11times);
9925         ESTAT_ADD(tx_collide_12times);
9926         ESTAT_ADD(tx_collide_13times);
9927         ESTAT_ADD(tx_collide_14times);
9928         ESTAT_ADD(tx_collide_15times);
9929         ESTAT_ADD(tx_ucast_packets);
9930         ESTAT_ADD(tx_mcast_packets);
9931         ESTAT_ADD(tx_bcast_packets);
9932         ESTAT_ADD(tx_carrier_sense_errors);
9933         ESTAT_ADD(tx_discards);
9934         ESTAT_ADD(tx_errors);
9935
9936         ESTAT_ADD(dma_writeq_full);
9937         ESTAT_ADD(dma_write_prioq_full);
9938         ESTAT_ADD(rxbds_empty);
9939         ESTAT_ADD(rx_discards);
9940         ESTAT_ADD(rx_errors);
9941         ESTAT_ADD(rx_threshold_hit);
9942
9943         ESTAT_ADD(dma_readq_full);
9944         ESTAT_ADD(dma_read_prioq_full);
9945         ESTAT_ADD(tx_comp_queue_full);
9946
9947         ESTAT_ADD(ring_set_send_prod_index);
9948         ESTAT_ADD(ring_status_update);
9949         ESTAT_ADD(nic_irqs);
9950         ESTAT_ADD(nic_avoided_irqs);
9951         ESTAT_ADD(nic_tx_threshold_hit);
9952
9953         ESTAT_ADD(mbuf_lwm_thresh_hit);
9954
9955         return estats;
9956 }
9957
9958 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9959                                                  struct rtnl_link_stats64 *stats)
9960 {
9961         struct tg3 *tp = netdev_priv(dev);
9962         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9963         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9964
9965         if (!hw_stats)
9966                 return old_stats;
9967
9968         stats->rx_packets = old_stats->rx_packets +
9969                 get_stat64(&hw_stats->rx_ucast_packets) +
9970                 get_stat64(&hw_stats->rx_mcast_packets) +
9971                 get_stat64(&hw_stats->rx_bcast_packets);
9972
9973         stats->tx_packets = old_stats->tx_packets +
9974                 get_stat64(&hw_stats->tx_ucast_packets) +
9975                 get_stat64(&hw_stats->tx_mcast_packets) +
9976                 get_stat64(&hw_stats->tx_bcast_packets);
9977
9978         stats->rx_bytes = old_stats->rx_bytes +
9979                 get_stat64(&hw_stats->rx_octets);
9980         stats->tx_bytes = old_stats->tx_bytes +
9981                 get_stat64(&hw_stats->tx_octets);
9982
9983         stats->rx_errors = old_stats->rx_errors +
9984                 get_stat64(&hw_stats->rx_errors);
9985         stats->tx_errors = old_stats->tx_errors +
9986                 get_stat64(&hw_stats->tx_errors) +
9987                 get_stat64(&hw_stats->tx_mac_errors) +
9988                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9989                 get_stat64(&hw_stats->tx_discards);
9990
9991         stats->multicast = old_stats->multicast +
9992                 get_stat64(&hw_stats->rx_mcast_packets);
9993         stats->collisions = old_stats->collisions +
9994                 get_stat64(&hw_stats->tx_collisions);
9995
9996         stats->rx_length_errors = old_stats->rx_length_errors +
9997                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9998                 get_stat64(&hw_stats->rx_undersize_packets);
9999
10000         stats->rx_over_errors = old_stats->rx_over_errors +
10001                 get_stat64(&hw_stats->rxbds_empty);
10002         stats->rx_frame_errors = old_stats->rx_frame_errors +
10003                 get_stat64(&hw_stats->rx_align_errors);
10004         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10005                 get_stat64(&hw_stats->tx_discards);
10006         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10007                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10008
10009         stats->rx_crc_errors = old_stats->rx_crc_errors +
10010                 calc_crc_errors(tp);
10011
10012         stats->rx_missed_errors = old_stats->rx_missed_errors +
10013                 get_stat64(&hw_stats->rx_discards);
10014
10015         stats->rx_dropped = tp->rx_dropped;
10016
10017         return stats;
10018 }
10019
10020 static inline u32 calc_crc(unsigned char *buf, int len)
10021 {
10022         u32 reg;
10023         u32 tmp;
10024         int j, k;
10025
10026         reg = 0xffffffff;
10027
10028         for (j = 0; j < len; j++) {
10029                 reg ^= buf[j];
10030
10031                 for (k = 0; k < 8; k++) {
10032                         tmp = reg & 0x01;
10033
10034                         reg >>= 1;
10035
10036                         if (tmp)
10037                                 reg ^= 0xedb88320;
10038                 }
10039         }
10040
10041         return ~reg;
10042 }
10043
10044 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10045 {
10046         /* accept or reject all multicast frames */
10047         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10048         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10049         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10050         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10051 }
10052
10053 static void __tg3_set_rx_mode(struct net_device *dev)
10054 {
10055         struct tg3 *tp = netdev_priv(dev);
10056         u32 rx_mode;
10057
10058         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10059                                   RX_MODE_KEEP_VLAN_TAG);
10060
10061 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10062         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10063          * flag clear.
10064          */
10065         if (!tg3_flag(tp, ENABLE_ASF))
10066                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10067 #endif
10068
10069         if (dev->flags & IFF_PROMISC) {
10070                 /* Promiscuous mode. */
10071                 rx_mode |= RX_MODE_PROMISC;
10072         } else if (dev->flags & IFF_ALLMULTI) {
10073                 /* Accept all multicast. */
10074                 tg3_set_multi(tp, 1);
10075         } else if (netdev_mc_empty(dev)) {
10076                 /* Reject all multicast. */
10077                 tg3_set_multi(tp, 0);
10078         } else {
10079                 /* Accept one or more multicast(s). */
10080                 struct netdev_hw_addr *ha;
10081                 u32 mc_filter[4] = { 0, };
10082                 u32 regidx;
10083                 u32 bit;
10084                 u32 crc;
10085
10086                 netdev_for_each_mc_addr(ha, dev) {
10087                         crc = calc_crc(ha->addr, ETH_ALEN);
10088                         bit = ~crc & 0x7f;
10089                         regidx = (bit & 0x60) >> 5;
10090                         bit &= 0x1f;
10091                         mc_filter[regidx] |= (1 << bit);
10092                 }
10093
10094                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10095                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10096                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10097                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10098         }
10099
10100         if (rx_mode != tp->rx_mode) {
10101                 tp->rx_mode = rx_mode;
10102                 tw32_f(MAC_RX_MODE, rx_mode);
10103                 udelay(10);
10104         }
10105 }
10106
10107 static void tg3_set_rx_mode(struct net_device *dev)
10108 {
10109         struct tg3 *tp = netdev_priv(dev);
10110
10111         if (!netif_running(dev))
10112                 return;
10113
10114         tg3_full_lock(tp, 0);
10115         __tg3_set_rx_mode(dev);
10116         tg3_full_unlock(tp);
10117 }
10118
10119 static int tg3_get_regs_len(struct net_device *dev)
10120 {
10121         return TG3_REG_BLK_SIZE;
10122 }
10123
10124 static void tg3_get_regs(struct net_device *dev,
10125                 struct ethtool_regs *regs, void *_p)
10126 {
10127         struct tg3 *tp = netdev_priv(dev);
10128
10129         regs->version = 0;
10130
10131         memset(_p, 0, TG3_REG_BLK_SIZE);
10132
10133         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10134                 return;
10135
10136         tg3_full_lock(tp, 0);
10137
10138         tg3_dump_legacy_regs(tp, (u32 *)_p);
10139
10140         tg3_full_unlock(tp);
10141 }
10142
10143 static int tg3_get_eeprom_len(struct net_device *dev)
10144 {
10145         struct tg3 *tp = netdev_priv(dev);
10146
10147         return tp->nvram_size;
10148 }
10149
10150 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10151 {
10152         struct tg3 *tp = netdev_priv(dev);
10153         int ret;
10154         u8  *pd;
10155         u32 i, offset, len, b_offset, b_count;
10156         __be32 val;
10157
10158         if (tg3_flag(tp, NO_NVRAM))
10159                 return -EINVAL;
10160
10161         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10162                 return -EAGAIN;
10163
10164         offset = eeprom->offset;
10165         len = eeprom->len;
10166         eeprom->len = 0;
10167
10168         eeprom->magic = TG3_EEPROM_MAGIC;
10169
10170         if (offset & 3) {
10171                 /* adjustments to start on required 4 byte boundary */
10172                 b_offset = offset & 3;
10173                 b_count = 4 - b_offset;
10174                 if (b_count > len) {
10175                         /* i.e. offset=1 len=2 */
10176                         b_count = len;
10177                 }
10178                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10179                 if (ret)
10180                         return ret;
10181                 memcpy(data, ((char *)&val) + b_offset, b_count);
10182                 len -= b_count;
10183                 offset += b_count;
10184                 eeprom->len += b_count;
10185         }
10186
10187         /* read bytes up to the last 4 byte boundary */
10188         pd = &data[eeprom->len];
10189         for (i = 0; i < (len - (len & 3)); i += 4) {
10190                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10191                 if (ret) {
10192                         eeprom->len += i;
10193                         return ret;
10194                 }
10195                 memcpy(pd + i, &val, 4);
10196         }
10197         eeprom->len += i;
10198
10199         if (len & 3) {
10200                 /* read last bytes not ending on 4 byte boundary */
10201                 pd = &data[eeprom->len];
10202                 b_count = len & 3;
10203                 b_offset = offset + len - b_count;
10204                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10205                 if (ret)
10206                         return ret;
10207                 memcpy(pd, &val, b_count);
10208                 eeprom->len += b_count;
10209         }
10210         return 0;
10211 }
10212
10213 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10214
10215 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10216 {
10217         struct tg3 *tp = netdev_priv(dev);
10218         int ret;
10219         u32 offset, len, b_offset, odd_len;
10220         u8 *buf;
10221         __be32 start, end;
10222
10223         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10224                 return -EAGAIN;
10225
10226         if (tg3_flag(tp, NO_NVRAM) ||
10227             eeprom->magic != TG3_EEPROM_MAGIC)
10228                 return -EINVAL;
10229
10230         offset = eeprom->offset;
10231         len = eeprom->len;
10232
10233         if ((b_offset = (offset & 3))) {
10234                 /* adjustments to start on required 4 byte boundary */
10235                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10236                 if (ret)
10237                         return ret;
10238                 len += b_offset;
10239                 offset &= ~3;
10240                 if (len < 4)
10241                         len = 4;
10242         }
10243
10244         odd_len = 0;
10245         if (len & 3) {
10246                 /* adjustments to end on required 4 byte boundary */
10247                 odd_len = 1;
10248                 len = (len + 3) & ~3;
10249                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10250                 if (ret)
10251                         return ret;
10252         }
10253
10254         buf = data;
10255         if (b_offset || odd_len) {
10256                 buf = kmalloc(len, GFP_KERNEL);
10257                 if (!buf)
10258                         return -ENOMEM;
10259                 if (b_offset)
10260                         memcpy(buf, &start, 4);
10261                 if (odd_len)
10262                         memcpy(buf+len-4, &end, 4);
10263                 memcpy(buf + b_offset, data, eeprom->len);
10264         }
10265
10266         ret = tg3_nvram_write_block(tp, offset, len, buf);
10267
10268         if (buf != data)
10269                 kfree(buf);
10270
10271         return ret;
10272 }
10273
10274 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10275 {
10276         struct tg3 *tp = netdev_priv(dev);
10277
10278         if (tg3_flag(tp, USE_PHYLIB)) {
10279                 struct phy_device *phydev;
10280                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10281                         return -EAGAIN;
10282                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10283                 return phy_ethtool_gset(phydev, cmd);
10284         }
10285
10286         cmd->supported = (SUPPORTED_Autoneg);
10287
10288         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10289                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10290                                    SUPPORTED_1000baseT_Full);
10291
10292         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10293                 cmd->supported |= (SUPPORTED_100baseT_Half |
10294                                   SUPPORTED_100baseT_Full |
10295                                   SUPPORTED_10baseT_Half |
10296                                   SUPPORTED_10baseT_Full |
10297                                   SUPPORTED_TP);
10298                 cmd->port = PORT_TP;
10299         } else {
10300                 cmd->supported |= SUPPORTED_FIBRE;
10301                 cmd->port = PORT_FIBRE;
10302         }
10303
10304         cmd->advertising = tp->link_config.advertising;
10305         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10306                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10307                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10308                                 cmd->advertising |= ADVERTISED_Pause;
10309                         } else {
10310                                 cmd->advertising |= ADVERTISED_Pause |
10311                                                     ADVERTISED_Asym_Pause;
10312                         }
10313                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10314                         cmd->advertising |= ADVERTISED_Asym_Pause;
10315                 }
10316         }
10317         if (netif_running(dev)) {
10318                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10319                 cmd->duplex = tp->link_config.active_duplex;
10320         } else {
10321                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10322                 cmd->duplex = DUPLEX_INVALID;
10323         }
10324         cmd->phy_address = tp->phy_addr;
10325         cmd->transceiver = XCVR_INTERNAL;
10326         cmd->autoneg = tp->link_config.autoneg;
10327         cmd->maxtxpkt = 0;
10328         cmd->maxrxpkt = 0;
10329         return 0;
10330 }
10331
10332 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10333 {
10334         struct tg3 *tp = netdev_priv(dev);
10335         u32 speed = ethtool_cmd_speed(cmd);
10336
10337         if (tg3_flag(tp, USE_PHYLIB)) {
10338                 struct phy_device *phydev;
10339                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10340                         return -EAGAIN;
10341                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10342                 return phy_ethtool_sset(phydev, cmd);
10343         }
10344
10345         if (cmd->autoneg != AUTONEG_ENABLE &&
10346             cmd->autoneg != AUTONEG_DISABLE)
10347                 return -EINVAL;
10348
10349         if (cmd->autoneg == AUTONEG_DISABLE &&
10350             cmd->duplex != DUPLEX_FULL &&
10351             cmd->duplex != DUPLEX_HALF)
10352                 return -EINVAL;
10353
10354         if (cmd->autoneg == AUTONEG_ENABLE) {
10355                 u32 mask = ADVERTISED_Autoneg |
10356                            ADVERTISED_Pause |
10357                            ADVERTISED_Asym_Pause;
10358
10359                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10360                         mask |= ADVERTISED_1000baseT_Half |
10361                                 ADVERTISED_1000baseT_Full;
10362
10363                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10364                         mask |= ADVERTISED_100baseT_Half |
10365                                 ADVERTISED_100baseT_Full |
10366                                 ADVERTISED_10baseT_Half |
10367                                 ADVERTISED_10baseT_Full |
10368                                 ADVERTISED_TP;
10369                 else
10370                         mask |= ADVERTISED_FIBRE;
10371
10372                 if (cmd->advertising & ~mask)
10373                         return -EINVAL;
10374
10375                 mask &= (ADVERTISED_1000baseT_Half |
10376                          ADVERTISED_1000baseT_Full |
10377                          ADVERTISED_100baseT_Half |
10378                          ADVERTISED_100baseT_Full |
10379                          ADVERTISED_10baseT_Half |
10380                          ADVERTISED_10baseT_Full);
10381
10382                 cmd->advertising &= mask;
10383         } else {
10384                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10385                         if (speed != SPEED_1000)
10386                                 return -EINVAL;
10387
10388                         if (cmd->duplex != DUPLEX_FULL)
10389                                 return -EINVAL;
10390                 } else {
10391                         if (speed != SPEED_100 &&
10392                             speed != SPEED_10)
10393                                 return -EINVAL;
10394                 }
10395         }
10396
10397         tg3_full_lock(tp, 0);
10398
10399         tp->link_config.autoneg = cmd->autoneg;
10400         if (cmd->autoneg == AUTONEG_ENABLE) {
10401                 tp->link_config.advertising = (cmd->advertising |
10402                                               ADVERTISED_Autoneg);
10403                 tp->link_config.speed = SPEED_INVALID;
10404                 tp->link_config.duplex = DUPLEX_INVALID;
10405         } else {
10406                 tp->link_config.advertising = 0;
10407                 tp->link_config.speed = speed;
10408                 tp->link_config.duplex = cmd->duplex;
10409         }
10410
10411         tp->link_config.orig_speed = tp->link_config.speed;
10412         tp->link_config.orig_duplex = tp->link_config.duplex;
10413         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10414
10415         if (netif_running(dev))
10416                 tg3_setup_phy(tp, 1);
10417
10418         tg3_full_unlock(tp);
10419
10420         return 0;
10421 }
10422
10423 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10424 {
10425         struct tg3 *tp = netdev_priv(dev);
10426
10427         strcpy(info->driver, DRV_MODULE_NAME);
10428         strcpy(info->version, DRV_MODULE_VERSION);
10429         strcpy(info->fw_version, tp->fw_ver);
10430         strcpy(info->bus_info, pci_name(tp->pdev));
10431 }
10432
10433 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10434 {
10435         struct tg3 *tp = netdev_priv(dev);
10436
10437         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10438                 wol->supported = WAKE_MAGIC;
10439         else
10440                 wol->supported = 0;
10441         wol->wolopts = 0;
10442         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10443                 wol->wolopts = WAKE_MAGIC;
10444         memset(&wol->sopass, 0, sizeof(wol->sopass));
10445 }
10446
10447 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10448 {
10449         struct tg3 *tp = netdev_priv(dev);
10450         struct device *dp = &tp->pdev->dev;
10451
10452         if (wol->wolopts & ~WAKE_MAGIC)
10453                 return -EINVAL;
10454         if ((wol->wolopts & WAKE_MAGIC) &&
10455             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10456                 return -EINVAL;
10457
10458         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10459
10460         spin_lock_bh(&tp->lock);
10461         if (device_may_wakeup(dp))
10462                 tg3_flag_set(tp, WOL_ENABLE);
10463         else
10464                 tg3_flag_clear(tp, WOL_ENABLE);
10465         spin_unlock_bh(&tp->lock);
10466
10467         return 0;
10468 }
10469
10470 static u32 tg3_get_msglevel(struct net_device *dev)
10471 {
10472         struct tg3 *tp = netdev_priv(dev);
10473         return tp->msg_enable;
10474 }
10475
10476 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10477 {
10478         struct tg3 *tp = netdev_priv(dev);
10479         tp->msg_enable = value;
10480 }
10481
10482 static int tg3_nway_reset(struct net_device *dev)
10483 {
10484         struct tg3 *tp = netdev_priv(dev);
10485         int r;
10486
10487         if (!netif_running(dev))
10488                 return -EAGAIN;
10489
10490         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10491                 return -EINVAL;
10492
10493         if (tg3_flag(tp, USE_PHYLIB)) {
10494                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10495                         return -EAGAIN;
10496                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10497         } else {
10498                 u32 bmcr;
10499
10500                 spin_lock_bh(&tp->lock);
10501                 r = -EINVAL;
10502                 tg3_readphy(tp, MII_BMCR, &bmcr);
10503                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10504                     ((bmcr & BMCR_ANENABLE) ||
10505                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10506                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10507                                                    BMCR_ANENABLE);
10508                         r = 0;
10509                 }
10510                 spin_unlock_bh(&tp->lock);
10511         }
10512
10513         return r;
10514 }
10515
10516 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10517 {
10518         struct tg3 *tp = netdev_priv(dev);
10519
10520         ering->rx_max_pending = tp->rx_std_ring_mask;
10521         ering->rx_mini_max_pending = 0;
10522         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10523                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10524         else
10525                 ering->rx_jumbo_max_pending = 0;
10526
10527         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10528
10529         ering->rx_pending = tp->rx_pending;
10530         ering->rx_mini_pending = 0;
10531         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10532                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10533         else
10534                 ering->rx_jumbo_pending = 0;
10535
10536         ering->tx_pending = tp->napi[0].tx_pending;
10537 }
10538
10539 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10540 {
10541         struct tg3 *tp = netdev_priv(dev);
10542         int i, irq_sync = 0, err = 0;
10543
10544         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10545             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10546             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10547             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10548             (tg3_flag(tp, TSO_BUG) &&
10549              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10550                 return -EINVAL;
10551
10552         if (netif_running(dev)) {
10553                 tg3_phy_stop(tp);
10554                 tg3_netif_stop(tp);
10555                 irq_sync = 1;
10556         }
10557
10558         tg3_full_lock(tp, irq_sync);
10559
10560         tp->rx_pending = ering->rx_pending;
10561
10562         if (tg3_flag(tp, MAX_RXPEND_64) &&
10563             tp->rx_pending > 63)
10564                 tp->rx_pending = 63;
10565         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10566
10567         for (i = 0; i < tp->irq_max; i++)
10568                 tp->napi[i].tx_pending = ering->tx_pending;
10569
10570         if (netif_running(dev)) {
10571                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10572                 err = tg3_restart_hw(tp, 1);
10573                 if (!err)
10574                         tg3_netif_start(tp);
10575         }
10576
10577         tg3_full_unlock(tp);
10578
10579         if (irq_sync && !err)
10580                 tg3_phy_start(tp);
10581
10582         return err;
10583 }
10584
10585 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10586 {
10587         struct tg3 *tp = netdev_priv(dev);
10588
10589         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10590
10591         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10592                 epause->rx_pause = 1;
10593         else
10594                 epause->rx_pause = 0;
10595
10596         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10597                 epause->tx_pause = 1;
10598         else
10599                 epause->tx_pause = 0;
10600 }
10601
10602 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10603 {
10604         struct tg3 *tp = netdev_priv(dev);
10605         int err = 0;
10606
10607         if (tg3_flag(tp, USE_PHYLIB)) {
10608                 u32 newadv;
10609                 struct phy_device *phydev;
10610
10611                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10612
10613                 if (!(phydev->supported & SUPPORTED_Pause) ||
10614                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10615                      (epause->rx_pause != epause->tx_pause)))
10616                         return -EINVAL;
10617
10618                 tp->link_config.flowctrl = 0;
10619                 if (epause->rx_pause) {
10620                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10621
10622                         if (epause->tx_pause) {
10623                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10624                                 newadv = ADVERTISED_Pause;
10625                         } else
10626                                 newadv = ADVERTISED_Pause |
10627                                          ADVERTISED_Asym_Pause;
10628                 } else if (epause->tx_pause) {
10629                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10630                         newadv = ADVERTISED_Asym_Pause;
10631                 } else
10632                         newadv = 0;
10633
10634                 if (epause->autoneg)
10635                         tg3_flag_set(tp, PAUSE_AUTONEG);
10636                 else
10637                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10638
10639                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10640                         u32 oldadv = phydev->advertising &
10641                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10642                         if (oldadv != newadv) {
10643                                 phydev->advertising &=
10644                                         ~(ADVERTISED_Pause |
10645                                           ADVERTISED_Asym_Pause);
10646                                 phydev->advertising |= newadv;
10647                                 if (phydev->autoneg) {
10648                                         /*
10649                                          * Always renegotiate the link to
10650                                          * inform our link partner of our
10651                                          * flow control settings, even if the
10652                                          * flow control is forced.  Let
10653                                          * tg3_adjust_link() do the final
10654                                          * flow control setup.
10655                                          */
10656                                         return phy_start_aneg(phydev);
10657                                 }
10658                         }
10659
10660                         if (!epause->autoneg)
10661                                 tg3_setup_flow_control(tp, 0, 0);
10662                 } else {
10663                         tp->link_config.orig_advertising &=
10664                                         ~(ADVERTISED_Pause |
10665                                           ADVERTISED_Asym_Pause);
10666                         tp->link_config.orig_advertising |= newadv;
10667                 }
10668         } else {
10669                 int irq_sync = 0;
10670
10671                 if (netif_running(dev)) {
10672                         tg3_netif_stop(tp);
10673                         irq_sync = 1;
10674                 }
10675
10676                 tg3_full_lock(tp, irq_sync);
10677
10678                 if (epause->autoneg)
10679                         tg3_flag_set(tp, PAUSE_AUTONEG);
10680                 else
10681                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10682                 if (epause->rx_pause)
10683                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10684                 else
10685                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10686                 if (epause->tx_pause)
10687                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10688                 else
10689                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10690
10691                 if (netif_running(dev)) {
10692                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10693                         err = tg3_restart_hw(tp, 1);
10694                         if (!err)
10695                                 tg3_netif_start(tp);
10696                 }
10697
10698                 tg3_full_unlock(tp);
10699         }
10700
10701         return err;
10702 }
10703
10704 static int tg3_get_sset_count(struct net_device *dev, int sset)
10705 {
10706         switch (sset) {
10707         case ETH_SS_TEST:
10708                 return TG3_NUM_TEST;
10709         case ETH_SS_STATS:
10710                 return TG3_NUM_STATS;
10711         default:
10712                 return -EOPNOTSUPP;
10713         }
10714 }
10715
10716 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10717 {
10718         switch (stringset) {
10719         case ETH_SS_STATS:
10720                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10721                 break;
10722         case ETH_SS_TEST:
10723                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10724                 break;
10725         default:
10726                 WARN_ON(1);     /* we need a WARN() */
10727                 break;
10728         }
10729 }
10730
10731 static int tg3_set_phys_id(struct net_device *dev,
10732                             enum ethtool_phys_id_state state)
10733 {
10734         struct tg3 *tp = netdev_priv(dev);
10735
10736         if (!netif_running(tp->dev))
10737                 return -EAGAIN;
10738
10739         switch (state) {
10740         case ETHTOOL_ID_ACTIVE:
10741                 return 1;       /* cycle on/off once per second */
10742
10743         case ETHTOOL_ID_ON:
10744                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10745                      LED_CTRL_1000MBPS_ON |
10746                      LED_CTRL_100MBPS_ON |
10747                      LED_CTRL_10MBPS_ON |
10748                      LED_CTRL_TRAFFIC_OVERRIDE |
10749                      LED_CTRL_TRAFFIC_BLINK |
10750                      LED_CTRL_TRAFFIC_LED);
10751                 break;
10752
10753         case ETHTOOL_ID_OFF:
10754                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10755                      LED_CTRL_TRAFFIC_OVERRIDE);
10756                 break;
10757
10758         case ETHTOOL_ID_INACTIVE:
10759                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10760                 break;
10761         }
10762
10763         return 0;
10764 }
10765
10766 static void tg3_get_ethtool_stats(struct net_device *dev,
10767                                    struct ethtool_stats *estats, u64 *tmp_stats)
10768 {
10769         struct tg3 *tp = netdev_priv(dev);
10770         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10771 }
10772
10773 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10774 {
10775         int i;
10776         __be32 *buf;
10777         u32 offset = 0, len = 0;
10778         u32 magic, val;
10779
10780         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10781                 return NULL;
10782
10783         if (magic == TG3_EEPROM_MAGIC) {
10784                 for (offset = TG3_NVM_DIR_START;
10785                      offset < TG3_NVM_DIR_END;
10786                      offset += TG3_NVM_DIRENT_SIZE) {
10787                         if (tg3_nvram_read(tp, offset, &val))
10788                                 return NULL;
10789
10790                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10791                             TG3_NVM_DIRTYPE_EXTVPD)
10792                                 break;
10793                 }
10794
10795                 if (offset != TG3_NVM_DIR_END) {
10796                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10797                         if (tg3_nvram_read(tp, offset + 4, &offset))
10798                                 return NULL;
10799
10800                         offset = tg3_nvram_logical_addr(tp, offset);
10801                 }
10802         }
10803
10804         if (!offset || !len) {
10805                 offset = TG3_NVM_VPD_OFF;
10806                 len = TG3_NVM_VPD_LEN;
10807         }
10808
10809         buf = kmalloc(len, GFP_KERNEL);
10810         if (buf == NULL)
10811                 return NULL;
10812
10813         if (magic == TG3_EEPROM_MAGIC) {
10814                 for (i = 0; i < len; i += 4) {
10815                         /* The data is in little-endian format in NVRAM.
10816                          * Use the big-endian read routines to preserve
10817                          * the byte order as it exists in NVRAM.
10818                          */
10819                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10820                                 goto error;
10821                 }
10822         } else {
10823                 u8 *ptr;
10824                 ssize_t cnt;
10825                 unsigned int pos = 0;
10826
10827                 ptr = (u8 *)&buf[0];
10828                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10829                         cnt = pci_read_vpd(tp->pdev, pos,
10830                                            len - pos, ptr);
10831                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10832                                 cnt = 0;
10833                         else if (cnt < 0)
10834                                 goto error;
10835                 }
10836                 if (pos != len)
10837                         goto error;
10838         }
10839
10840         *vpdlen = len;
10841
10842         return buf;
10843
10844 error:
10845         kfree(buf);
10846         return NULL;
10847 }
10848
10849 #define NVRAM_TEST_SIZE 0x100
10850 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10851 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10852 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10853 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10854 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10855 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10856 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10857 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10858
10859 static int tg3_test_nvram(struct tg3 *tp)
10860 {
10861         u32 csum, magic, len;
10862         __be32 *buf;
10863         int i, j, k, err = 0, size;
10864
10865         if (tg3_flag(tp, NO_NVRAM))
10866                 return 0;
10867
10868         if (tg3_nvram_read(tp, 0, &magic) != 0)
10869                 return -EIO;
10870
10871         if (magic == TG3_EEPROM_MAGIC)
10872                 size = NVRAM_TEST_SIZE;
10873         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10874                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10875                     TG3_EEPROM_SB_FORMAT_1) {
10876                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10877                         case TG3_EEPROM_SB_REVISION_0:
10878                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10879                                 break;
10880                         case TG3_EEPROM_SB_REVISION_2:
10881                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10882                                 break;
10883                         case TG3_EEPROM_SB_REVISION_3:
10884                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10885                                 break;
10886                         case TG3_EEPROM_SB_REVISION_4:
10887                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10888                                 break;
10889                         case TG3_EEPROM_SB_REVISION_5:
10890                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10891                                 break;
10892                         case TG3_EEPROM_SB_REVISION_6:
10893                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10894                                 break;
10895                         default:
10896                                 return -EIO;
10897                         }
10898                 } else
10899                         return 0;
10900         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10901                 size = NVRAM_SELFBOOT_HW_SIZE;
10902         else
10903                 return -EIO;
10904
10905         buf = kmalloc(size, GFP_KERNEL);
10906         if (buf == NULL)
10907                 return -ENOMEM;
10908
10909         err = -EIO;
10910         for (i = 0, j = 0; i < size; i += 4, j++) {
10911                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10912                 if (err)
10913                         break;
10914         }
10915         if (i < size)
10916                 goto out;
10917
10918         /* Selfboot format */
10919         magic = be32_to_cpu(buf[0]);
10920         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10921             TG3_EEPROM_MAGIC_FW) {
10922                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10923
10924                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10925                     TG3_EEPROM_SB_REVISION_2) {
10926                         /* For rev 2, the csum doesn't include the MBA. */
10927                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10928                                 csum8 += buf8[i];
10929                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10930                                 csum8 += buf8[i];
10931                 } else {
10932                         for (i = 0; i < size; i++)
10933                                 csum8 += buf8[i];
10934                 }
10935
10936                 if (csum8 == 0) {
10937                         err = 0;
10938                         goto out;
10939                 }
10940
10941                 err = -EIO;
10942                 goto out;
10943         }
10944
10945         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10946             TG3_EEPROM_MAGIC_HW) {
10947                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10948                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10949                 u8 *buf8 = (u8 *) buf;
10950
10951                 /* Separate the parity bits and the data bytes.  */
10952                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10953                         if ((i == 0) || (i == 8)) {
10954                                 int l;
10955                                 u8 msk;
10956
10957                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10958                                         parity[k++] = buf8[i] & msk;
10959                                 i++;
10960                         } else if (i == 16) {
10961                                 int l;
10962                                 u8 msk;
10963
10964                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10965                                         parity[k++] = buf8[i] & msk;
10966                                 i++;
10967
10968                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10969                                         parity[k++] = buf8[i] & msk;
10970                                 i++;
10971                         }
10972                         data[j++] = buf8[i];
10973                 }
10974
10975                 err = -EIO;
10976                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10977                         u8 hw8 = hweight8(data[i]);
10978
10979                         if ((hw8 & 0x1) && parity[i])
10980                                 goto out;
10981                         else if (!(hw8 & 0x1) && !parity[i])
10982                                 goto out;
10983                 }
10984                 err = 0;
10985                 goto out;
10986         }
10987
10988         err = -EIO;
10989
10990         /* Bootstrap checksum at offset 0x10 */
10991         csum = calc_crc((unsigned char *) buf, 0x10);
10992         if (csum != le32_to_cpu(buf[0x10/4]))
10993                 goto out;
10994
10995         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10996         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10997         if (csum != le32_to_cpu(buf[0xfc/4]))
10998                 goto out;
10999
11000         kfree(buf);
11001
11002         buf = tg3_vpd_readblock(tp, &len);
11003         if (!buf)
11004                 return -ENOMEM;
11005
11006         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11007         if (i > 0) {
11008                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11009                 if (j < 0)
11010                         goto out;
11011
11012                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11013                         goto out;
11014
11015                 i += PCI_VPD_LRDT_TAG_SIZE;
11016                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11017                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11018                 if (j > 0) {
11019                         u8 csum8 = 0;
11020
11021                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11022
11023                         for (i = 0; i <= j; i++)
11024                                 csum8 += ((u8 *)buf)[i];
11025
11026                         if (csum8)
11027                                 goto out;
11028                 }
11029         }
11030
11031         err = 0;
11032
11033 out:
11034         kfree(buf);
11035         return err;
11036 }
11037
11038 #define TG3_SERDES_TIMEOUT_SEC  2
11039 #define TG3_COPPER_TIMEOUT_SEC  6
11040
11041 static int tg3_test_link(struct tg3 *tp)
11042 {
11043         int i, max;
11044
11045         if (!netif_running(tp->dev))
11046                 return -ENODEV;
11047
11048         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11049                 max = TG3_SERDES_TIMEOUT_SEC;
11050         else
11051                 max = TG3_COPPER_TIMEOUT_SEC;
11052
11053         for (i = 0; i < max; i++) {
11054                 if (netif_carrier_ok(tp->dev))
11055                         return 0;
11056
11057                 if (msleep_interruptible(1000))
11058                         break;
11059         }
11060
11061         return -EIO;
11062 }
11063
11064 /* Only test the commonly used registers */
11065 static int tg3_test_registers(struct tg3 *tp)
11066 {
11067         int i, is_5705, is_5750;
11068         u32 offset, read_mask, write_mask, val, save_val, read_val;
11069         static struct {
11070                 u16 offset;
11071                 u16 flags;
11072 #define TG3_FL_5705     0x1
11073 #define TG3_FL_NOT_5705 0x2
11074 #define TG3_FL_NOT_5788 0x4
11075 #define TG3_FL_NOT_5750 0x8
11076                 u32 read_mask;
11077                 u32 write_mask;
11078         } reg_tbl[] = {
11079                 /* MAC Control Registers */
11080                 { MAC_MODE, TG3_FL_NOT_5705,
11081                         0x00000000, 0x00ef6f8c },
11082                 { MAC_MODE, TG3_FL_5705,
11083                         0x00000000, 0x01ef6b8c },
11084                 { MAC_STATUS, TG3_FL_NOT_5705,
11085                         0x03800107, 0x00000000 },
11086                 { MAC_STATUS, TG3_FL_5705,
11087                         0x03800100, 0x00000000 },
11088                 { MAC_ADDR_0_HIGH, 0x0000,
11089                         0x00000000, 0x0000ffff },
11090                 { MAC_ADDR_0_LOW, 0x0000,
11091                         0x00000000, 0xffffffff },
11092                 { MAC_RX_MTU_SIZE, 0x0000,
11093                         0x00000000, 0x0000ffff },
11094                 { MAC_TX_MODE, 0x0000,
11095                         0x00000000, 0x00000070 },
11096                 { MAC_TX_LENGTHS, 0x0000,
11097                         0x00000000, 0x00003fff },
11098                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11099                         0x00000000, 0x000007fc },
11100                 { MAC_RX_MODE, TG3_FL_5705,
11101                         0x00000000, 0x000007dc },
11102                 { MAC_HASH_REG_0, 0x0000,
11103                         0x00000000, 0xffffffff },
11104                 { MAC_HASH_REG_1, 0x0000,
11105                         0x00000000, 0xffffffff },
11106                 { MAC_HASH_REG_2, 0x0000,
11107                         0x00000000, 0xffffffff },
11108                 { MAC_HASH_REG_3, 0x0000,
11109                         0x00000000, 0xffffffff },
11110
11111                 /* Receive Data and Receive BD Initiator Control Registers. */
11112                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11113                         0x00000000, 0xffffffff },
11114                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11115                         0x00000000, 0xffffffff },
11116                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11117                         0x00000000, 0x00000003 },
11118                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11119                         0x00000000, 0xffffffff },
11120                 { RCVDBDI_STD_BD+0, 0x0000,
11121                         0x00000000, 0xffffffff },
11122                 { RCVDBDI_STD_BD+4, 0x0000,
11123                         0x00000000, 0xffffffff },
11124                 { RCVDBDI_STD_BD+8, 0x0000,
11125                         0x00000000, 0xffff0002 },
11126                 { RCVDBDI_STD_BD+0xc, 0x0000,
11127                         0x00000000, 0xffffffff },
11128
11129                 /* Receive BD Initiator Control Registers. */
11130                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11131                         0x00000000, 0xffffffff },
11132                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11133                         0x00000000, 0x000003ff },
11134                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11135                         0x00000000, 0xffffffff },
11136
11137                 /* Host Coalescing Control Registers. */
11138                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11139                         0x00000000, 0x00000004 },
11140                 { HOSTCC_MODE, TG3_FL_5705,
11141                         0x00000000, 0x000000f6 },
11142                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11143                         0x00000000, 0xffffffff },
11144                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11145                         0x00000000, 0x000003ff },
11146                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11147                         0x00000000, 0xffffffff },
11148                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11149                         0x00000000, 0x000003ff },
11150                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11151                         0x00000000, 0xffffffff },
11152                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11153                         0x00000000, 0x000000ff },
11154                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11155                         0x00000000, 0xffffffff },
11156                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11157                         0x00000000, 0x000000ff },
11158                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11159                         0x00000000, 0xffffffff },
11160                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11161                         0x00000000, 0xffffffff },
11162                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11163                         0x00000000, 0xffffffff },
11164                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11165                         0x00000000, 0x000000ff },
11166                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11167                         0x00000000, 0xffffffff },
11168                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11169                         0x00000000, 0x000000ff },
11170                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11171                         0x00000000, 0xffffffff },
11172                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11173                         0x00000000, 0xffffffff },
11174                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11175                         0x00000000, 0xffffffff },
11176                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11177                         0x00000000, 0xffffffff },
11178                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11179                         0x00000000, 0xffffffff },
11180                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11181                         0xffffffff, 0x00000000 },
11182                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11183                         0xffffffff, 0x00000000 },
11184
11185                 /* Buffer Manager Control Registers. */
11186                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11187                         0x00000000, 0x007fff80 },
11188                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11189                         0x00000000, 0x007fffff },
11190                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11191                         0x00000000, 0x0000003f },
11192                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11193                         0x00000000, 0x000001ff },
11194                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11195                         0x00000000, 0x000001ff },
11196                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11197                         0xffffffff, 0x00000000 },
11198                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11199                         0xffffffff, 0x00000000 },
11200
11201                 /* Mailbox Registers */
11202                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11203                         0x00000000, 0x000001ff },
11204                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11205                         0x00000000, 0x000001ff },
11206                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11207                         0x00000000, 0x000007ff },
11208                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11209                         0x00000000, 0x000001ff },
11210
11211                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11212         };
11213
11214         is_5705 = is_5750 = 0;
11215         if (tg3_flag(tp, 5705_PLUS)) {
11216                 is_5705 = 1;
11217                 if (tg3_flag(tp, 5750_PLUS))
11218                         is_5750 = 1;
11219         }
11220
11221         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11222                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11223                         continue;
11224
11225                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11226                         continue;
11227
11228                 if (tg3_flag(tp, IS_5788) &&
11229                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11230                         continue;
11231
11232                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11233                         continue;
11234
11235                 offset = (u32) reg_tbl[i].offset;
11236                 read_mask = reg_tbl[i].read_mask;
11237                 write_mask = reg_tbl[i].write_mask;
11238
11239                 /* Save the original register content */
11240                 save_val = tr32(offset);
11241
11242                 /* Determine the read-only value. */
11243                 read_val = save_val & read_mask;
11244
11245                 /* Write zero to the register, then make sure the read-only bits
11246                  * are not changed and the read/write bits are all zeros.
11247                  */
11248                 tw32(offset, 0);
11249
11250                 val = tr32(offset);
11251
11252                 /* Test the read-only and read/write bits. */
11253                 if (((val & read_mask) != read_val) || (val & write_mask))
11254                         goto out;
11255
11256                 /* Write ones to all the bits defined by RdMask and WrMask, then
11257                  * make sure the read-only bits are not changed and the
11258                  * read/write bits are all ones.
11259                  */
11260                 tw32(offset, read_mask | write_mask);
11261
11262                 val = tr32(offset);
11263
11264                 /* Test the read-only bits. */
11265                 if ((val & read_mask) != read_val)
11266                         goto out;
11267
11268                 /* Test the read/write bits. */
11269                 if ((val & write_mask) != write_mask)
11270                         goto out;
11271
11272                 tw32(offset, save_val);
11273         }
11274
11275         return 0;
11276
11277 out:
11278         if (netif_msg_hw(tp))
11279                 netdev_err(tp->dev,
11280                            "Register test failed at offset %x\n", offset);
11281         tw32(offset, save_val);
11282         return -EIO;
11283 }
11284
11285 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11286 {
11287         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11288         int i;
11289         u32 j;
11290
11291         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11292                 for (j = 0; j < len; j += 4) {
11293                         u32 val;
11294
11295                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11296                         tg3_read_mem(tp, offset + j, &val);
11297                         if (val != test_pattern[i])
11298                                 return -EIO;
11299                 }
11300         }
11301         return 0;
11302 }
11303
11304 static int tg3_test_memory(struct tg3 *tp)
11305 {
11306         static struct mem_entry {
11307                 u32 offset;
11308                 u32 len;
11309         } mem_tbl_570x[] = {
11310                 { 0x00000000, 0x00b50},
11311                 { 0x00002000, 0x1c000},
11312                 { 0xffffffff, 0x00000}
11313         }, mem_tbl_5705[] = {
11314                 { 0x00000100, 0x0000c},
11315                 { 0x00000200, 0x00008},
11316                 { 0x00004000, 0x00800},
11317                 { 0x00006000, 0x01000},
11318                 { 0x00008000, 0x02000},
11319                 { 0x00010000, 0x0e000},
11320                 { 0xffffffff, 0x00000}
11321         }, mem_tbl_5755[] = {
11322                 { 0x00000200, 0x00008},
11323                 { 0x00004000, 0x00800},
11324                 { 0x00006000, 0x00800},
11325                 { 0x00008000, 0x02000},
11326                 { 0x00010000, 0x0c000},
11327                 { 0xffffffff, 0x00000}
11328         }, mem_tbl_5906[] = {
11329                 { 0x00000200, 0x00008},
11330                 { 0x00004000, 0x00400},
11331                 { 0x00006000, 0x00400},
11332                 { 0x00008000, 0x01000},
11333                 { 0x00010000, 0x01000},
11334                 { 0xffffffff, 0x00000}
11335         }, mem_tbl_5717[] = {
11336                 { 0x00000200, 0x00008},
11337                 { 0x00010000, 0x0a000},
11338                 { 0x00020000, 0x13c00},
11339                 { 0xffffffff, 0x00000}
11340         }, mem_tbl_57765[] = {
11341                 { 0x00000200, 0x00008},
11342                 { 0x00004000, 0x00800},
11343                 { 0x00006000, 0x09800},
11344                 { 0x00010000, 0x0a000},
11345                 { 0xffffffff, 0x00000}
11346         };
11347         struct mem_entry *mem_tbl;
11348         int err = 0;
11349         int i;
11350
11351         if (tg3_flag(tp, 5717_PLUS))
11352                 mem_tbl = mem_tbl_5717;
11353         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11354                 mem_tbl = mem_tbl_57765;
11355         else if (tg3_flag(tp, 5755_PLUS))
11356                 mem_tbl = mem_tbl_5755;
11357         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11358                 mem_tbl = mem_tbl_5906;
11359         else if (tg3_flag(tp, 5705_PLUS))
11360                 mem_tbl = mem_tbl_5705;
11361         else
11362                 mem_tbl = mem_tbl_570x;
11363
11364         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11365                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11366                 if (err)
11367                         break;
11368         }
11369
11370         return err;
11371 }
11372
11373 #define TG3_TSO_MSS             500
11374
11375 #define TG3_TSO_IP_HDR_LEN      20
11376 #define TG3_TSO_TCP_HDR_LEN     20
11377 #define TG3_TSO_TCP_OPT_LEN     12
11378
11379 static const u8 tg3_tso_header[] = {
11380 0x08, 0x00,
11381 0x45, 0x00, 0x00, 0x00,
11382 0x00, 0x00, 0x40, 0x00,
11383 0x40, 0x06, 0x00, 0x00,
11384 0x0a, 0x00, 0x00, 0x01,
11385 0x0a, 0x00, 0x00, 0x02,
11386 0x0d, 0x00, 0xe0, 0x00,
11387 0x00, 0x00, 0x01, 0x00,
11388 0x00, 0x00, 0x02, 0x00,
11389 0x80, 0x10, 0x10, 0x00,
11390 0x14, 0x09, 0x00, 0x00,
11391 0x01, 0x01, 0x08, 0x0a,
11392 0x11, 0x11, 0x11, 0x11,
11393 0x11, 0x11, 0x11, 0x11,
11394 };
11395
11396 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11397 {
11398         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11399         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11400         u32 budget;
11401         struct sk_buff *skb, *rx_skb;
11402         u8 *tx_data;
11403         dma_addr_t map;
11404         int num_pkts, tx_len, rx_len, i, err;
11405         struct tg3_rx_buffer_desc *desc;
11406         struct tg3_napi *tnapi, *rnapi;
11407         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11408
11409         tnapi = &tp->napi[0];
11410         rnapi = &tp->napi[0];
11411         if (tp->irq_cnt > 1) {
11412                 if (tg3_flag(tp, ENABLE_RSS))
11413                         rnapi = &tp->napi[1];
11414                 if (tg3_flag(tp, ENABLE_TSS))
11415                         tnapi = &tp->napi[1];
11416         }
11417         coal_now = tnapi->coal_now | rnapi->coal_now;
11418
11419         err = -EIO;
11420
11421         tx_len = pktsz;
11422         skb = netdev_alloc_skb(tp->dev, tx_len);
11423         if (!skb)
11424                 return -ENOMEM;
11425
11426         tx_data = skb_put(skb, tx_len);
11427         memcpy(tx_data, tp->dev->dev_addr, 6);
11428         memset(tx_data + 6, 0x0, 8);
11429
11430         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11431
11432         if (tso_loopback) {
11433                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11434
11435                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11436                               TG3_TSO_TCP_OPT_LEN;
11437
11438                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11439                        sizeof(tg3_tso_header));
11440                 mss = TG3_TSO_MSS;
11441
11442                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11443                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11444
11445                 /* Set the total length field in the IP header */
11446                 iph->tot_len = htons((u16)(mss + hdr_len));
11447
11448                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11449                               TXD_FLAG_CPU_POST_DMA);
11450
11451                 if (tg3_flag(tp, HW_TSO_1) ||
11452                     tg3_flag(tp, HW_TSO_2) ||
11453                     tg3_flag(tp, HW_TSO_3)) {
11454                         struct tcphdr *th;
11455                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11456                         th = (struct tcphdr *)&tx_data[val];
11457                         th->check = 0;
11458                 } else
11459                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11460
11461                 if (tg3_flag(tp, HW_TSO_3)) {
11462                         mss |= (hdr_len & 0xc) << 12;
11463                         if (hdr_len & 0x10)
11464                                 base_flags |= 0x00000010;
11465                         base_flags |= (hdr_len & 0x3e0) << 5;
11466                 } else if (tg3_flag(tp, HW_TSO_2))
11467                         mss |= hdr_len << 9;
11468                 else if (tg3_flag(tp, HW_TSO_1) ||
11469                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11470                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11471                 } else {
11472                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11473                 }
11474
11475                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11476         } else {
11477                 num_pkts = 1;
11478                 data_off = ETH_HLEN;
11479         }
11480
11481         for (i = data_off; i < tx_len; i++)
11482                 tx_data[i] = (u8) (i & 0xff);
11483
11484         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11485         if (pci_dma_mapping_error(tp->pdev, map)) {
11486                 dev_kfree_skb(skb);
11487                 return -EIO;
11488         }
11489
11490         val = tnapi->tx_prod;
11491         tnapi->tx_buffers[val].skb = skb;
11492         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11493
11494         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11495                rnapi->coal_now);
11496
11497         udelay(10);
11498
11499         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11500
11501         budget = tg3_tx_avail(tnapi);
11502         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11503                             base_flags | TXD_FLAG_END, mss, 0)) {
11504                 tnapi->tx_buffers[val].skb = NULL;
11505                 dev_kfree_skb(skb);
11506                 return -EIO;
11507         }
11508
11509         tnapi->tx_prod++;
11510
11511         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11512         tr32_mailbox(tnapi->prodmbox);
11513
11514         udelay(10);
11515
11516         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11517         for (i = 0; i < 35; i++) {
11518                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11519                        coal_now);
11520
11521                 udelay(10);
11522
11523                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11524                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11525                 if ((tx_idx == tnapi->tx_prod) &&
11526                     (rx_idx == (rx_start_idx + num_pkts)))
11527                         break;
11528         }
11529
11530         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11531         dev_kfree_skb(skb);
11532
11533         if (tx_idx != tnapi->tx_prod)
11534                 goto out;
11535
11536         if (rx_idx != rx_start_idx + num_pkts)
11537                 goto out;
11538
11539         val = data_off;
11540         while (rx_idx != rx_start_idx) {
11541                 desc = &rnapi->rx_rcb[rx_start_idx++];
11542                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11543                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11544
11545                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11546                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11547                         goto out;
11548
11549                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11550                          - ETH_FCS_LEN;
11551
11552                 if (!tso_loopback) {
11553                         if (rx_len != tx_len)
11554                                 goto out;
11555
11556                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11557                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11558                                         goto out;
11559                         } else {
11560                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11561                                         goto out;
11562                         }
11563                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11564                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11565                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11566                         goto out;
11567                 }
11568
11569                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11570                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11571                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11572                                              mapping);
11573                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11574                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11575                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11576                                              mapping);
11577                 } else
11578                         goto out;
11579
11580                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11581                                             PCI_DMA_FROMDEVICE);
11582
11583                 for (i = data_off; i < rx_len; i++, val++) {
11584                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11585                                 goto out;
11586                 }
11587         }
11588
11589         err = 0;
11590
11591         /* tg3_free_rings will unmap and free the rx_skb */
11592 out:
11593         return err;
11594 }
11595
11596 #define TG3_STD_LOOPBACK_FAILED         1
11597 #define TG3_JMB_LOOPBACK_FAILED         2
11598 #define TG3_TSO_LOOPBACK_FAILED         4
11599 #define TG3_LOOPBACK_FAILED \
11600         (TG3_STD_LOOPBACK_FAILED | \
11601          TG3_JMB_LOOPBACK_FAILED | \
11602          TG3_TSO_LOOPBACK_FAILED)
11603
11604 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11605 {
11606         int err = -EIO;
11607         u32 eee_cap;
11608
11609         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11610         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11611
11612         if (!netif_running(tp->dev)) {
11613                 data[0] = TG3_LOOPBACK_FAILED;
11614                 data[1] = TG3_LOOPBACK_FAILED;
11615                 if (do_extlpbk)
11616                         data[2] = TG3_LOOPBACK_FAILED;
11617                 goto done;
11618         }
11619
11620         err = tg3_reset_hw(tp, 1);
11621         if (err) {
11622                 data[0] = TG3_LOOPBACK_FAILED;
11623                 data[1] = TG3_LOOPBACK_FAILED;
11624                 if (do_extlpbk)
11625                         data[2] = TG3_LOOPBACK_FAILED;
11626                 goto done;
11627         }
11628
11629         if (tg3_flag(tp, ENABLE_RSS)) {
11630                 int i;
11631
11632                 /* Reroute all rx packets to the 1st queue */
11633                 for (i = MAC_RSS_INDIR_TBL_0;
11634                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11635                         tw32(i, 0x0);
11636         }
11637
11638         /* HW errata - mac loopback fails in some cases on 5780.
11639          * Normal traffic and PHY loopback are not affected by
11640          * errata.  Also, the MAC loopback test is deprecated for
11641          * all newer ASIC revisions.
11642          */
11643         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11644             !tg3_flag(tp, CPMU_PRESENT)) {
11645                 tg3_mac_loopback(tp, true);
11646
11647                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11648                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11649
11650                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11651                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11652                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11653
11654                 tg3_mac_loopback(tp, false);
11655         }
11656
11657         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11658             !tg3_flag(tp, USE_PHYLIB)) {
11659                 int i;
11660
11661                 tg3_phy_lpbk_set(tp, 0, false);
11662
11663                 /* Wait for link */
11664                 for (i = 0; i < 100; i++) {
11665                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11666                                 break;
11667                         mdelay(1);
11668                 }
11669
11670                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11671                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11672                 if (tg3_flag(tp, TSO_CAPABLE) &&
11673                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11674                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11675                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11676                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11677                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11678
11679                 if (do_extlpbk) {
11680                         tg3_phy_lpbk_set(tp, 0, true);
11681
11682                         /* All link indications report up, but the hardware
11683                          * isn't really ready for about 20 msec.  Double it
11684                          * to be sure.
11685                          */
11686                         mdelay(40);
11687
11688                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11689                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11690                         if (tg3_flag(tp, TSO_CAPABLE) &&
11691                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11692                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11693                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11694                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11695                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11696                 }
11697
11698                 /* Re-enable gphy autopowerdown. */
11699                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11700                         tg3_phy_toggle_apd(tp, true);
11701         }
11702
11703         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11704
11705 done:
11706         tp->phy_flags |= eee_cap;
11707
11708         return err;
11709 }
11710
11711 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11712                           u64 *data)
11713 {
11714         struct tg3 *tp = netdev_priv(dev);
11715         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11716
11717         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11718             tg3_power_up(tp)) {
11719                 etest->flags |= ETH_TEST_FL_FAILED;
11720                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11721                 return;
11722         }
11723
11724         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11725
11726         if (tg3_test_nvram(tp) != 0) {
11727                 etest->flags |= ETH_TEST_FL_FAILED;
11728                 data[0] = 1;
11729         }
11730         if (!doextlpbk && tg3_test_link(tp)) {
11731                 etest->flags |= ETH_TEST_FL_FAILED;
11732                 data[1] = 1;
11733         }
11734         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11735                 int err, err2 = 0, irq_sync = 0;
11736
11737                 if (netif_running(dev)) {
11738                         tg3_phy_stop(tp);
11739                         tg3_netif_stop(tp);
11740                         irq_sync = 1;
11741                 }
11742
11743                 tg3_full_lock(tp, irq_sync);
11744
11745                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11746                 err = tg3_nvram_lock(tp);
11747                 tg3_halt_cpu(tp, RX_CPU_BASE);
11748                 if (!tg3_flag(tp, 5705_PLUS))
11749                         tg3_halt_cpu(tp, TX_CPU_BASE);
11750                 if (!err)
11751                         tg3_nvram_unlock(tp);
11752
11753                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11754                         tg3_phy_reset(tp);
11755
11756                 if (tg3_test_registers(tp) != 0) {
11757                         etest->flags |= ETH_TEST_FL_FAILED;
11758                         data[2] = 1;
11759                 }
11760
11761                 if (tg3_test_memory(tp) != 0) {
11762                         etest->flags |= ETH_TEST_FL_FAILED;
11763                         data[3] = 1;
11764                 }
11765
11766                 if (doextlpbk)
11767                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11768
11769                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11770                         etest->flags |= ETH_TEST_FL_FAILED;
11771
11772                 tg3_full_unlock(tp);
11773
11774                 if (tg3_test_interrupt(tp) != 0) {
11775                         etest->flags |= ETH_TEST_FL_FAILED;
11776                         data[7] = 1;
11777                 }
11778
11779                 tg3_full_lock(tp, 0);
11780
11781                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11782                 if (netif_running(dev)) {
11783                         tg3_flag_set(tp, INIT_COMPLETE);
11784                         err2 = tg3_restart_hw(tp, 1);
11785                         if (!err2)
11786                                 tg3_netif_start(tp);
11787                 }
11788
11789                 tg3_full_unlock(tp);
11790
11791                 if (irq_sync && !err2)
11792                         tg3_phy_start(tp);
11793         }
11794         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11795                 tg3_power_down(tp);
11796
11797 }
11798
11799 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11800 {
11801         struct mii_ioctl_data *data = if_mii(ifr);
11802         struct tg3 *tp = netdev_priv(dev);
11803         int err;
11804
11805         if (tg3_flag(tp, USE_PHYLIB)) {
11806                 struct phy_device *phydev;
11807                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11808                         return -EAGAIN;
11809                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11810                 return phy_mii_ioctl(phydev, ifr, cmd);
11811         }
11812
11813         switch (cmd) {
11814         case SIOCGMIIPHY:
11815                 data->phy_id = tp->phy_addr;
11816
11817                 /* fallthru */
11818         case SIOCGMIIREG: {
11819                 u32 mii_regval;
11820
11821                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11822                         break;                  /* We have no PHY */
11823
11824                 if (!netif_running(dev))
11825                         return -EAGAIN;
11826
11827                 spin_lock_bh(&tp->lock);
11828                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11829                 spin_unlock_bh(&tp->lock);
11830
11831                 data->val_out = mii_regval;
11832
11833                 return err;
11834         }
11835
11836         case SIOCSMIIREG:
11837                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11838                         break;                  /* We have no PHY */
11839
11840                 if (!netif_running(dev))
11841                         return -EAGAIN;
11842
11843                 spin_lock_bh(&tp->lock);
11844                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11845                 spin_unlock_bh(&tp->lock);
11846
11847                 return err;
11848
11849         default:
11850                 /* do nothing */
11851                 break;
11852         }
11853         return -EOPNOTSUPP;
11854 }
11855
11856 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11857 {
11858         struct tg3 *tp = netdev_priv(dev);
11859
11860         memcpy(ec, &tp->coal, sizeof(*ec));
11861         return 0;
11862 }
11863
11864 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11865 {
11866         struct tg3 *tp = netdev_priv(dev);
11867         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11868         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11869
11870         if (!tg3_flag(tp, 5705_PLUS)) {
11871                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11872                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11873                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11874                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11875         }
11876
11877         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11878             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11879             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11880             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11881             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11882             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11883             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11884             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11885             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11886             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11887                 return -EINVAL;
11888
11889         /* No rx interrupts will be generated if both are zero */
11890         if ((ec->rx_coalesce_usecs == 0) &&
11891             (ec->rx_max_coalesced_frames == 0))
11892                 return -EINVAL;
11893
11894         /* No tx interrupts will be generated if both are zero */
11895         if ((ec->tx_coalesce_usecs == 0) &&
11896             (ec->tx_max_coalesced_frames == 0))
11897                 return -EINVAL;
11898
11899         /* Only copy relevant parameters, ignore all others. */
11900         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11901         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11902         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11903         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11904         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11905         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11906         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11907         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11908         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11909
11910         if (netif_running(dev)) {
11911                 tg3_full_lock(tp, 0);
11912                 __tg3_set_coalesce(tp, &tp->coal);
11913                 tg3_full_unlock(tp);
11914         }
11915         return 0;
11916 }
11917
11918 static const struct ethtool_ops tg3_ethtool_ops = {
11919         .get_settings           = tg3_get_settings,
11920         .set_settings           = tg3_set_settings,
11921         .get_drvinfo            = tg3_get_drvinfo,
11922         .get_regs_len           = tg3_get_regs_len,
11923         .get_regs               = tg3_get_regs,
11924         .get_wol                = tg3_get_wol,
11925         .set_wol                = tg3_set_wol,
11926         .get_msglevel           = tg3_get_msglevel,
11927         .set_msglevel           = tg3_set_msglevel,
11928         .nway_reset             = tg3_nway_reset,
11929         .get_link               = ethtool_op_get_link,
11930         .get_eeprom_len         = tg3_get_eeprom_len,
11931         .get_eeprom             = tg3_get_eeprom,
11932         .set_eeprom             = tg3_set_eeprom,
11933         .get_ringparam          = tg3_get_ringparam,
11934         .set_ringparam          = tg3_set_ringparam,
11935         .get_pauseparam         = tg3_get_pauseparam,
11936         .set_pauseparam         = tg3_set_pauseparam,
11937         .self_test              = tg3_self_test,
11938         .get_strings            = tg3_get_strings,
11939         .set_phys_id            = tg3_set_phys_id,
11940         .get_ethtool_stats      = tg3_get_ethtool_stats,
11941         .get_coalesce           = tg3_get_coalesce,
11942         .set_coalesce           = tg3_set_coalesce,
11943         .get_sset_count         = tg3_get_sset_count,
11944 };
11945
11946 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11947 {
11948         u32 cursize, val, magic;
11949
11950         tp->nvram_size = EEPROM_CHIP_SIZE;
11951
11952         if (tg3_nvram_read(tp, 0, &magic) != 0)
11953                 return;
11954
11955         if ((magic != TG3_EEPROM_MAGIC) &&
11956             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11957             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11958                 return;
11959
11960         /*
11961          * Size the chip by reading offsets at increasing powers of two.
11962          * When we encounter our validation signature, we know the addressing
11963          * has wrapped around, and thus have our chip size.
11964          */
11965         cursize = 0x10;
11966
11967         while (cursize < tp->nvram_size) {
11968                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11969                         return;
11970
11971                 if (val == magic)
11972                         break;
11973
11974                 cursize <<= 1;
11975         }
11976
11977         tp->nvram_size = cursize;
11978 }
11979
11980 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11981 {
11982         u32 val;
11983
11984         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11985                 return;
11986
11987         /* Selfboot format */
11988         if (val != TG3_EEPROM_MAGIC) {
11989                 tg3_get_eeprom_size(tp);
11990                 return;
11991         }
11992
11993         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11994                 if (val != 0) {
11995                         /* This is confusing.  We want to operate on the
11996                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11997                          * call will read from NVRAM and byteswap the data
11998                          * according to the byteswapping settings for all
11999                          * other register accesses.  This ensures the data we
12000                          * want will always reside in the lower 16-bits.
12001                          * However, the data in NVRAM is in LE format, which
12002                          * means the data from the NVRAM read will always be
12003                          * opposite the endianness of the CPU.  The 16-bit
12004                          * byteswap then brings the data to CPU endianness.
12005                          */
12006                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12007                         return;
12008                 }
12009         }
12010         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12011 }
12012
12013 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12014 {
12015         u32 nvcfg1;
12016
12017         nvcfg1 = tr32(NVRAM_CFG1);
12018         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12019                 tg3_flag_set(tp, FLASH);
12020         } else {
12021                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12022                 tw32(NVRAM_CFG1, nvcfg1);
12023         }
12024
12025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12026             tg3_flag(tp, 5780_CLASS)) {
12027                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12028                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12029                         tp->nvram_jedecnum = JEDEC_ATMEL;
12030                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12031                         tg3_flag_set(tp, NVRAM_BUFFERED);
12032                         break;
12033                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12034                         tp->nvram_jedecnum = JEDEC_ATMEL;
12035                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12036                         break;
12037                 case FLASH_VENDOR_ATMEL_EEPROM:
12038                         tp->nvram_jedecnum = JEDEC_ATMEL;
12039                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12040                         tg3_flag_set(tp, NVRAM_BUFFERED);
12041                         break;
12042                 case FLASH_VENDOR_ST:
12043                         tp->nvram_jedecnum = JEDEC_ST;
12044                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12045                         tg3_flag_set(tp, NVRAM_BUFFERED);
12046                         break;
12047                 case FLASH_VENDOR_SAIFUN:
12048                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12049                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12050                         break;
12051                 case FLASH_VENDOR_SST_SMALL:
12052                 case FLASH_VENDOR_SST_LARGE:
12053                         tp->nvram_jedecnum = JEDEC_SST;
12054                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12055                         break;
12056                 }
12057         } else {
12058                 tp->nvram_jedecnum = JEDEC_ATMEL;
12059                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12060                 tg3_flag_set(tp, NVRAM_BUFFERED);
12061         }
12062 }
12063
12064 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12065 {
12066         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12067         case FLASH_5752PAGE_SIZE_256:
12068                 tp->nvram_pagesize = 256;
12069                 break;
12070         case FLASH_5752PAGE_SIZE_512:
12071                 tp->nvram_pagesize = 512;
12072                 break;
12073         case FLASH_5752PAGE_SIZE_1K:
12074                 tp->nvram_pagesize = 1024;
12075                 break;
12076         case FLASH_5752PAGE_SIZE_2K:
12077                 tp->nvram_pagesize = 2048;
12078                 break;
12079         case FLASH_5752PAGE_SIZE_4K:
12080                 tp->nvram_pagesize = 4096;
12081                 break;
12082         case FLASH_5752PAGE_SIZE_264:
12083                 tp->nvram_pagesize = 264;
12084                 break;
12085         case FLASH_5752PAGE_SIZE_528:
12086                 tp->nvram_pagesize = 528;
12087                 break;
12088         }
12089 }
12090
12091 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12092 {
12093         u32 nvcfg1;
12094
12095         nvcfg1 = tr32(NVRAM_CFG1);
12096
12097         /* NVRAM protection for TPM */
12098         if (nvcfg1 & (1 << 27))
12099                 tg3_flag_set(tp, PROTECTED_NVRAM);
12100
12101         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12102         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12103         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12104                 tp->nvram_jedecnum = JEDEC_ATMEL;
12105                 tg3_flag_set(tp, NVRAM_BUFFERED);
12106                 break;
12107         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12108                 tp->nvram_jedecnum = JEDEC_ATMEL;
12109                 tg3_flag_set(tp, NVRAM_BUFFERED);
12110                 tg3_flag_set(tp, FLASH);
12111                 break;
12112         case FLASH_5752VENDOR_ST_M45PE10:
12113         case FLASH_5752VENDOR_ST_M45PE20:
12114         case FLASH_5752VENDOR_ST_M45PE40:
12115                 tp->nvram_jedecnum = JEDEC_ST;
12116                 tg3_flag_set(tp, NVRAM_BUFFERED);
12117                 tg3_flag_set(tp, FLASH);
12118                 break;
12119         }
12120
12121         if (tg3_flag(tp, FLASH)) {
12122                 tg3_nvram_get_pagesize(tp, nvcfg1);
12123         } else {
12124                 /* For eeprom, set pagesize to maximum eeprom size */
12125                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12126
12127                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12128                 tw32(NVRAM_CFG1, nvcfg1);
12129         }
12130 }
12131
12132 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12133 {
12134         u32 nvcfg1, protect = 0;
12135
12136         nvcfg1 = tr32(NVRAM_CFG1);
12137
12138         /* NVRAM protection for TPM */
12139         if (nvcfg1 & (1 << 27)) {
12140                 tg3_flag_set(tp, PROTECTED_NVRAM);
12141                 protect = 1;
12142         }
12143
12144         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12145         switch (nvcfg1) {
12146         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12147         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12148         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12149         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12150                 tp->nvram_jedecnum = JEDEC_ATMEL;
12151                 tg3_flag_set(tp, NVRAM_BUFFERED);
12152                 tg3_flag_set(tp, FLASH);
12153                 tp->nvram_pagesize = 264;
12154                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12155                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12156                         tp->nvram_size = (protect ? 0x3e200 :
12157                                           TG3_NVRAM_SIZE_512KB);
12158                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12159                         tp->nvram_size = (protect ? 0x1f200 :
12160                                           TG3_NVRAM_SIZE_256KB);
12161                 else
12162                         tp->nvram_size = (protect ? 0x1f200 :
12163                                           TG3_NVRAM_SIZE_128KB);
12164                 break;
12165         case FLASH_5752VENDOR_ST_M45PE10:
12166         case FLASH_5752VENDOR_ST_M45PE20:
12167         case FLASH_5752VENDOR_ST_M45PE40:
12168                 tp->nvram_jedecnum = JEDEC_ST;
12169                 tg3_flag_set(tp, NVRAM_BUFFERED);
12170                 tg3_flag_set(tp, FLASH);
12171                 tp->nvram_pagesize = 256;
12172                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12173                         tp->nvram_size = (protect ?
12174                                           TG3_NVRAM_SIZE_64KB :
12175                                           TG3_NVRAM_SIZE_128KB);
12176                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12177                         tp->nvram_size = (protect ?
12178                                           TG3_NVRAM_SIZE_64KB :
12179                                           TG3_NVRAM_SIZE_256KB);
12180                 else
12181                         tp->nvram_size = (protect ?
12182                                           TG3_NVRAM_SIZE_128KB :
12183                                           TG3_NVRAM_SIZE_512KB);
12184                 break;
12185         }
12186 }
12187
12188 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12189 {
12190         u32 nvcfg1;
12191
12192         nvcfg1 = tr32(NVRAM_CFG1);
12193
12194         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12195         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12196         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12197         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12198         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12199                 tp->nvram_jedecnum = JEDEC_ATMEL;
12200                 tg3_flag_set(tp, NVRAM_BUFFERED);
12201                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12202
12203                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12204                 tw32(NVRAM_CFG1, nvcfg1);
12205                 break;
12206         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12207         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12208         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12209         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12210                 tp->nvram_jedecnum = JEDEC_ATMEL;
12211                 tg3_flag_set(tp, NVRAM_BUFFERED);
12212                 tg3_flag_set(tp, FLASH);
12213                 tp->nvram_pagesize = 264;
12214                 break;
12215         case FLASH_5752VENDOR_ST_M45PE10:
12216         case FLASH_5752VENDOR_ST_M45PE20:
12217         case FLASH_5752VENDOR_ST_M45PE40:
12218                 tp->nvram_jedecnum = JEDEC_ST;
12219                 tg3_flag_set(tp, NVRAM_BUFFERED);
12220                 tg3_flag_set(tp, FLASH);
12221                 tp->nvram_pagesize = 256;
12222                 break;
12223         }
12224 }
12225
12226 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12227 {
12228         u32 nvcfg1, protect = 0;
12229
12230         nvcfg1 = tr32(NVRAM_CFG1);
12231
12232         /* NVRAM protection for TPM */
12233         if (nvcfg1 & (1 << 27)) {
12234                 tg3_flag_set(tp, PROTECTED_NVRAM);
12235                 protect = 1;
12236         }
12237
12238         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12239         switch (nvcfg1) {
12240         case FLASH_5761VENDOR_ATMEL_ADB021D:
12241         case FLASH_5761VENDOR_ATMEL_ADB041D:
12242         case FLASH_5761VENDOR_ATMEL_ADB081D:
12243         case FLASH_5761VENDOR_ATMEL_ADB161D:
12244         case FLASH_5761VENDOR_ATMEL_MDB021D:
12245         case FLASH_5761VENDOR_ATMEL_MDB041D:
12246         case FLASH_5761VENDOR_ATMEL_MDB081D:
12247         case FLASH_5761VENDOR_ATMEL_MDB161D:
12248                 tp->nvram_jedecnum = JEDEC_ATMEL;
12249                 tg3_flag_set(tp, NVRAM_BUFFERED);
12250                 tg3_flag_set(tp, FLASH);
12251                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12252                 tp->nvram_pagesize = 256;
12253                 break;
12254         case FLASH_5761VENDOR_ST_A_M45PE20:
12255         case FLASH_5761VENDOR_ST_A_M45PE40:
12256         case FLASH_5761VENDOR_ST_A_M45PE80:
12257         case FLASH_5761VENDOR_ST_A_M45PE16:
12258         case FLASH_5761VENDOR_ST_M_M45PE20:
12259         case FLASH_5761VENDOR_ST_M_M45PE40:
12260         case FLASH_5761VENDOR_ST_M_M45PE80:
12261         case FLASH_5761VENDOR_ST_M_M45PE16:
12262                 tp->nvram_jedecnum = JEDEC_ST;
12263                 tg3_flag_set(tp, NVRAM_BUFFERED);
12264                 tg3_flag_set(tp, FLASH);
12265                 tp->nvram_pagesize = 256;
12266                 break;
12267         }
12268
12269         if (protect) {
12270                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12271         } else {
12272                 switch (nvcfg1) {
12273                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12274                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12275                 case FLASH_5761VENDOR_ST_A_M45PE16:
12276                 case FLASH_5761VENDOR_ST_M_M45PE16:
12277                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12278                         break;
12279                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12280                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12281                 case FLASH_5761VENDOR_ST_A_M45PE80:
12282                 case FLASH_5761VENDOR_ST_M_M45PE80:
12283                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12284                         break;
12285                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12286                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12287                 case FLASH_5761VENDOR_ST_A_M45PE40:
12288                 case FLASH_5761VENDOR_ST_M_M45PE40:
12289                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12290                         break;
12291                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12292                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12293                 case FLASH_5761VENDOR_ST_A_M45PE20:
12294                 case FLASH_5761VENDOR_ST_M_M45PE20:
12295                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12296                         break;
12297                 }
12298         }
12299 }
12300
12301 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12302 {
12303         tp->nvram_jedecnum = JEDEC_ATMEL;
12304         tg3_flag_set(tp, NVRAM_BUFFERED);
12305         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12306 }
12307
12308 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12309 {
12310         u32 nvcfg1;
12311
12312         nvcfg1 = tr32(NVRAM_CFG1);
12313
12314         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12315         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12316         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12317                 tp->nvram_jedecnum = JEDEC_ATMEL;
12318                 tg3_flag_set(tp, NVRAM_BUFFERED);
12319                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12320
12321                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12322                 tw32(NVRAM_CFG1, nvcfg1);
12323                 return;
12324         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12325         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12326         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12327         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12328         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12329         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12330         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12331                 tp->nvram_jedecnum = JEDEC_ATMEL;
12332                 tg3_flag_set(tp, NVRAM_BUFFERED);
12333                 tg3_flag_set(tp, FLASH);
12334
12335                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12336                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12337                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12338                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12339                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12340                         break;
12341                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12342                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12343                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12344                         break;
12345                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12346                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12347                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12348                         break;
12349                 }
12350                 break;
12351         case FLASH_5752VENDOR_ST_M45PE10:
12352         case FLASH_5752VENDOR_ST_M45PE20:
12353         case FLASH_5752VENDOR_ST_M45PE40:
12354                 tp->nvram_jedecnum = JEDEC_ST;
12355                 tg3_flag_set(tp, NVRAM_BUFFERED);
12356                 tg3_flag_set(tp, FLASH);
12357
12358                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12359                 case FLASH_5752VENDOR_ST_M45PE10:
12360                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12361                         break;
12362                 case FLASH_5752VENDOR_ST_M45PE20:
12363                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12364                         break;
12365                 case FLASH_5752VENDOR_ST_M45PE40:
12366                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12367                         break;
12368                 }
12369                 break;
12370         default:
12371                 tg3_flag_set(tp, NO_NVRAM);
12372                 return;
12373         }
12374
12375         tg3_nvram_get_pagesize(tp, nvcfg1);
12376         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12377                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12378 }
12379
12380
12381 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12382 {
12383         u32 nvcfg1;
12384
12385         nvcfg1 = tr32(NVRAM_CFG1);
12386
12387         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12388         case FLASH_5717VENDOR_ATMEL_EEPROM:
12389         case FLASH_5717VENDOR_MICRO_EEPROM:
12390                 tp->nvram_jedecnum = JEDEC_ATMEL;
12391                 tg3_flag_set(tp, NVRAM_BUFFERED);
12392                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12393
12394                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12395                 tw32(NVRAM_CFG1, nvcfg1);
12396                 return;
12397         case FLASH_5717VENDOR_ATMEL_MDB011D:
12398         case FLASH_5717VENDOR_ATMEL_ADB011B:
12399         case FLASH_5717VENDOR_ATMEL_ADB011D:
12400         case FLASH_5717VENDOR_ATMEL_MDB021D:
12401         case FLASH_5717VENDOR_ATMEL_ADB021B:
12402         case FLASH_5717VENDOR_ATMEL_ADB021D:
12403         case FLASH_5717VENDOR_ATMEL_45USPT:
12404                 tp->nvram_jedecnum = JEDEC_ATMEL;
12405                 tg3_flag_set(tp, NVRAM_BUFFERED);
12406                 tg3_flag_set(tp, FLASH);
12407
12408                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12409                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12410                         /* Detect size with tg3_nvram_get_size() */
12411                         break;
12412                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12413                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12414                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12415                         break;
12416                 default:
12417                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12418                         break;
12419                 }
12420                 break;
12421         case FLASH_5717VENDOR_ST_M_M25PE10:
12422         case FLASH_5717VENDOR_ST_A_M25PE10:
12423         case FLASH_5717VENDOR_ST_M_M45PE10:
12424         case FLASH_5717VENDOR_ST_A_M45PE10:
12425         case FLASH_5717VENDOR_ST_M_M25PE20:
12426         case FLASH_5717VENDOR_ST_A_M25PE20:
12427         case FLASH_5717VENDOR_ST_M_M45PE20:
12428         case FLASH_5717VENDOR_ST_A_M45PE20:
12429         case FLASH_5717VENDOR_ST_25USPT:
12430         case FLASH_5717VENDOR_ST_45USPT:
12431                 tp->nvram_jedecnum = JEDEC_ST;
12432                 tg3_flag_set(tp, NVRAM_BUFFERED);
12433                 tg3_flag_set(tp, FLASH);
12434
12435                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12436                 case FLASH_5717VENDOR_ST_M_M25PE20:
12437                 case FLASH_5717VENDOR_ST_M_M45PE20:
12438                         /* Detect size with tg3_nvram_get_size() */
12439                         break;
12440                 case FLASH_5717VENDOR_ST_A_M25PE20:
12441                 case FLASH_5717VENDOR_ST_A_M45PE20:
12442                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12443                         break;
12444                 default:
12445                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12446                         break;
12447                 }
12448                 break;
12449         default:
12450                 tg3_flag_set(tp, NO_NVRAM);
12451                 return;
12452         }
12453
12454         tg3_nvram_get_pagesize(tp, nvcfg1);
12455         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12456                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12457 }
12458
12459 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12460 {
12461         u32 nvcfg1, nvmpinstrp;
12462
12463         nvcfg1 = tr32(NVRAM_CFG1);
12464         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12465
12466         switch (nvmpinstrp) {
12467         case FLASH_5720_EEPROM_HD:
12468         case FLASH_5720_EEPROM_LD:
12469                 tp->nvram_jedecnum = JEDEC_ATMEL;
12470                 tg3_flag_set(tp, NVRAM_BUFFERED);
12471
12472                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12473                 tw32(NVRAM_CFG1, nvcfg1);
12474                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12475                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12476                 else
12477                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12478                 return;
12479         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12480         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12481         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12482         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12483         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12484         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12485         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12486         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12487         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12488         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12489         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12490         case FLASH_5720VENDOR_ATMEL_45USPT:
12491                 tp->nvram_jedecnum = JEDEC_ATMEL;
12492                 tg3_flag_set(tp, NVRAM_BUFFERED);
12493                 tg3_flag_set(tp, FLASH);
12494
12495                 switch (nvmpinstrp) {
12496                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12497                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12498                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12499                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12500                         break;
12501                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12502                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12503                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12504                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12505                         break;
12506                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12507                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12508                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12509                         break;
12510                 default:
12511                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12512                         break;
12513                 }
12514                 break;
12515         case FLASH_5720VENDOR_M_ST_M25PE10:
12516         case FLASH_5720VENDOR_M_ST_M45PE10:
12517         case FLASH_5720VENDOR_A_ST_M25PE10:
12518         case FLASH_5720VENDOR_A_ST_M45PE10:
12519         case FLASH_5720VENDOR_M_ST_M25PE20:
12520         case FLASH_5720VENDOR_M_ST_M45PE20:
12521         case FLASH_5720VENDOR_A_ST_M25PE20:
12522         case FLASH_5720VENDOR_A_ST_M45PE20:
12523         case FLASH_5720VENDOR_M_ST_M25PE40:
12524         case FLASH_5720VENDOR_M_ST_M45PE40:
12525         case FLASH_5720VENDOR_A_ST_M25PE40:
12526         case FLASH_5720VENDOR_A_ST_M45PE40:
12527         case FLASH_5720VENDOR_M_ST_M25PE80:
12528         case FLASH_5720VENDOR_M_ST_M45PE80:
12529         case FLASH_5720VENDOR_A_ST_M25PE80:
12530         case FLASH_5720VENDOR_A_ST_M45PE80:
12531         case FLASH_5720VENDOR_ST_25USPT:
12532         case FLASH_5720VENDOR_ST_45USPT:
12533                 tp->nvram_jedecnum = JEDEC_ST;
12534                 tg3_flag_set(tp, NVRAM_BUFFERED);
12535                 tg3_flag_set(tp, FLASH);
12536
12537                 switch (nvmpinstrp) {
12538                 case FLASH_5720VENDOR_M_ST_M25PE20:
12539                 case FLASH_5720VENDOR_M_ST_M45PE20:
12540                 case FLASH_5720VENDOR_A_ST_M25PE20:
12541                 case FLASH_5720VENDOR_A_ST_M45PE20:
12542                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12543                         break;
12544                 case FLASH_5720VENDOR_M_ST_M25PE40:
12545                 case FLASH_5720VENDOR_M_ST_M45PE40:
12546                 case FLASH_5720VENDOR_A_ST_M25PE40:
12547                 case FLASH_5720VENDOR_A_ST_M45PE40:
12548                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12549                         break;
12550                 case FLASH_5720VENDOR_M_ST_M25PE80:
12551                 case FLASH_5720VENDOR_M_ST_M45PE80:
12552                 case FLASH_5720VENDOR_A_ST_M25PE80:
12553                 case FLASH_5720VENDOR_A_ST_M45PE80:
12554                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12555                         break;
12556                 default:
12557                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12558                         break;
12559                 }
12560                 break;
12561         default:
12562                 tg3_flag_set(tp, NO_NVRAM);
12563                 return;
12564         }
12565
12566         tg3_nvram_get_pagesize(tp, nvcfg1);
12567         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12568                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12569 }
12570
12571 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12572 static void __devinit tg3_nvram_init(struct tg3 *tp)
12573 {
12574         tw32_f(GRC_EEPROM_ADDR,
12575              (EEPROM_ADDR_FSM_RESET |
12576               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12577                EEPROM_ADDR_CLKPERD_SHIFT)));
12578
12579         msleep(1);
12580
12581         /* Enable seeprom accesses. */
12582         tw32_f(GRC_LOCAL_CTRL,
12583              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12584         udelay(100);
12585
12586         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12587             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12588                 tg3_flag_set(tp, NVRAM);
12589
12590                 if (tg3_nvram_lock(tp)) {
12591                         netdev_warn(tp->dev,
12592                                     "Cannot get nvram lock, %s failed\n",
12593                                     __func__);
12594                         return;
12595                 }
12596                 tg3_enable_nvram_access(tp);
12597
12598                 tp->nvram_size = 0;
12599
12600                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12601                         tg3_get_5752_nvram_info(tp);
12602                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12603                         tg3_get_5755_nvram_info(tp);
12604                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12605                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12606                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12607                         tg3_get_5787_nvram_info(tp);
12608                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12609                         tg3_get_5761_nvram_info(tp);
12610                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12611                         tg3_get_5906_nvram_info(tp);
12612                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12613                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12614                         tg3_get_57780_nvram_info(tp);
12615                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12616                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12617                         tg3_get_5717_nvram_info(tp);
12618                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12619                         tg3_get_5720_nvram_info(tp);
12620                 else
12621                         tg3_get_nvram_info(tp);
12622
12623                 if (tp->nvram_size == 0)
12624                         tg3_get_nvram_size(tp);
12625
12626                 tg3_disable_nvram_access(tp);
12627                 tg3_nvram_unlock(tp);
12628
12629         } else {
12630                 tg3_flag_clear(tp, NVRAM);
12631                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12632
12633                 tg3_get_eeprom_size(tp);
12634         }
12635 }
12636
12637 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12638                                     u32 offset, u32 len, u8 *buf)
12639 {
12640         int i, j, rc = 0;
12641         u32 val;
12642
12643         for (i = 0; i < len; i += 4) {
12644                 u32 addr;
12645                 __be32 data;
12646
12647                 addr = offset + i;
12648
12649                 memcpy(&data, buf + i, 4);
12650
12651                 /*
12652                  * The SEEPROM interface expects the data to always be opposite
12653                  * the native endian format.  We accomplish this by reversing
12654                  * all the operations that would have been performed on the
12655                  * data from a call to tg3_nvram_read_be32().
12656                  */
12657                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12658
12659                 val = tr32(GRC_EEPROM_ADDR);
12660                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12661
12662                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12663                         EEPROM_ADDR_READ);
12664                 tw32(GRC_EEPROM_ADDR, val |
12665                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12666                         (addr & EEPROM_ADDR_ADDR_MASK) |
12667                         EEPROM_ADDR_START |
12668                         EEPROM_ADDR_WRITE);
12669
12670                 for (j = 0; j < 1000; j++) {
12671                         val = tr32(GRC_EEPROM_ADDR);
12672
12673                         if (val & EEPROM_ADDR_COMPLETE)
12674                                 break;
12675                         msleep(1);
12676                 }
12677                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12678                         rc = -EBUSY;
12679                         break;
12680                 }
12681         }
12682
12683         return rc;
12684 }
12685
12686 /* offset and length are dword aligned */
12687 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12688                 u8 *buf)
12689 {
12690         int ret = 0;
12691         u32 pagesize = tp->nvram_pagesize;
12692         u32 pagemask = pagesize - 1;
12693         u32 nvram_cmd;
12694         u8 *tmp;
12695
12696         tmp = kmalloc(pagesize, GFP_KERNEL);
12697         if (tmp == NULL)
12698                 return -ENOMEM;
12699
12700         while (len) {
12701                 int j;
12702                 u32 phy_addr, page_off, size;
12703
12704                 phy_addr = offset & ~pagemask;
12705
12706                 for (j = 0; j < pagesize; j += 4) {
12707                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12708                                                   (__be32 *) (tmp + j));
12709                         if (ret)
12710                                 break;
12711                 }
12712                 if (ret)
12713                         break;
12714
12715                 page_off = offset & pagemask;
12716                 size = pagesize;
12717                 if (len < size)
12718                         size = len;
12719
12720                 len -= size;
12721
12722                 memcpy(tmp + page_off, buf, size);
12723
12724                 offset = offset + (pagesize - page_off);
12725
12726                 tg3_enable_nvram_access(tp);
12727
12728                 /*
12729                  * Before we can erase the flash page, we need
12730                  * to issue a special "write enable" command.
12731                  */
12732                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12733
12734                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12735                         break;
12736
12737                 /* Erase the target page */
12738                 tw32(NVRAM_ADDR, phy_addr);
12739
12740                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12741                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12742
12743                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12744                         break;
12745
12746                 /* Issue another write enable to start the write. */
12747                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12748
12749                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12750                         break;
12751
12752                 for (j = 0; j < pagesize; j += 4) {
12753                         __be32 data;
12754
12755                         data = *((__be32 *) (tmp + j));
12756
12757                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12758
12759                         tw32(NVRAM_ADDR, phy_addr + j);
12760
12761                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12762                                 NVRAM_CMD_WR;
12763
12764                         if (j == 0)
12765                                 nvram_cmd |= NVRAM_CMD_FIRST;
12766                         else if (j == (pagesize - 4))
12767                                 nvram_cmd |= NVRAM_CMD_LAST;
12768
12769                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12770                                 break;
12771                 }
12772                 if (ret)
12773                         break;
12774         }
12775
12776         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12777         tg3_nvram_exec_cmd(tp, nvram_cmd);
12778
12779         kfree(tmp);
12780
12781         return ret;
12782 }
12783
12784 /* offset and length are dword aligned */
12785 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12786                 u8 *buf)
12787 {
12788         int i, ret = 0;
12789
12790         for (i = 0; i < len; i += 4, offset += 4) {
12791                 u32 page_off, phy_addr, nvram_cmd;
12792                 __be32 data;
12793
12794                 memcpy(&data, buf + i, 4);
12795                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12796
12797                 page_off = offset % tp->nvram_pagesize;
12798
12799                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12800
12801                 tw32(NVRAM_ADDR, phy_addr);
12802
12803                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12804
12805                 if (page_off == 0 || i == 0)
12806                         nvram_cmd |= NVRAM_CMD_FIRST;
12807                 if (page_off == (tp->nvram_pagesize - 4))
12808                         nvram_cmd |= NVRAM_CMD_LAST;
12809
12810                 if (i == (len - 4))
12811                         nvram_cmd |= NVRAM_CMD_LAST;
12812
12813                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12814                     !tg3_flag(tp, 5755_PLUS) &&
12815                     (tp->nvram_jedecnum == JEDEC_ST) &&
12816                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12817
12818                         if ((ret = tg3_nvram_exec_cmd(tp,
12819                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12820                                 NVRAM_CMD_DONE)))
12821
12822                                 break;
12823                 }
12824                 if (!tg3_flag(tp, FLASH)) {
12825                         /* We always do complete word writes to eeprom. */
12826                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12827                 }
12828
12829                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12830                         break;
12831         }
12832         return ret;
12833 }
12834
12835 /* offset and length are dword aligned */
12836 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12837 {
12838         int ret;
12839
12840         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12841                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12842                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12843                 udelay(40);
12844         }
12845
12846         if (!tg3_flag(tp, NVRAM)) {
12847                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12848         } else {
12849                 u32 grc_mode;
12850
12851                 ret = tg3_nvram_lock(tp);
12852                 if (ret)
12853                         return ret;
12854
12855                 tg3_enable_nvram_access(tp);
12856                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12857                         tw32(NVRAM_WRITE1, 0x406);
12858
12859                 grc_mode = tr32(GRC_MODE);
12860                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12861
12862                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12863                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12864                                 buf);
12865                 } else {
12866                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12867                                 buf);
12868                 }
12869
12870                 grc_mode = tr32(GRC_MODE);
12871                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12872
12873                 tg3_disable_nvram_access(tp);
12874                 tg3_nvram_unlock(tp);
12875         }
12876
12877         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12878                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12879                 udelay(40);
12880         }
12881
12882         return ret;
12883 }
12884
12885 struct subsys_tbl_ent {
12886         u16 subsys_vendor, subsys_devid;
12887         u32 phy_id;
12888 };
12889
12890 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12891         /* Broadcom boards. */
12892         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12893           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12894         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12896         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12898         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12900         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12902         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12904         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12906         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12908         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12910         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12911           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12912         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12913           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12914
12915         /* 3com boards. */
12916         { TG3PCI_SUBVENDOR_ID_3COM,
12917           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12918         { TG3PCI_SUBVENDOR_ID_3COM,
12919           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12920         { TG3PCI_SUBVENDOR_ID_3COM,
12921           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12922         { TG3PCI_SUBVENDOR_ID_3COM,
12923           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12924         { TG3PCI_SUBVENDOR_ID_3COM,
12925           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12926
12927         /* DELL boards. */
12928         { TG3PCI_SUBVENDOR_ID_DELL,
12929           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12930         { TG3PCI_SUBVENDOR_ID_DELL,
12931           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12932         { TG3PCI_SUBVENDOR_ID_DELL,
12933           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12934         { TG3PCI_SUBVENDOR_ID_DELL,
12935           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12936
12937         /* Compaq boards. */
12938         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12939           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12940         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12942         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12943           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12944         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12945           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12946         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12947           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12948
12949         /* IBM boards. */
12950         { TG3PCI_SUBVENDOR_ID_IBM,
12951           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12952 };
12953
12954 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12955 {
12956         int i;
12957
12958         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12959                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12960                      tp->pdev->subsystem_vendor) &&
12961                     (subsys_id_to_phy_id[i].subsys_devid ==
12962                      tp->pdev->subsystem_device))
12963                         return &subsys_id_to_phy_id[i];
12964         }
12965         return NULL;
12966 }
12967
12968 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12969 {
12970         u32 val;
12971
12972         tp->phy_id = TG3_PHY_ID_INVALID;
12973         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12974
12975         /* Assume an onboard device and WOL capable by default.  */
12976         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12977         tg3_flag_set(tp, WOL_CAP);
12978
12979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12980                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12981                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12982                         tg3_flag_set(tp, IS_NIC);
12983                 }
12984                 val = tr32(VCPU_CFGSHDW);
12985                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12986                         tg3_flag_set(tp, ASPM_WORKAROUND);
12987                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12988                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12989                         tg3_flag_set(tp, WOL_ENABLE);
12990                         device_set_wakeup_enable(&tp->pdev->dev, true);
12991                 }
12992                 goto done;
12993         }
12994
12995         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12996         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12997                 u32 nic_cfg, led_cfg;
12998                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12999                 int eeprom_phy_serdes = 0;
13000
13001                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13002                 tp->nic_sram_data_cfg = nic_cfg;
13003
13004                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13005                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13006                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13007                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13008                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13009                     (ver > 0) && (ver < 0x100))
13010                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13011
13012                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13013                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13014
13015                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13016                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13017                         eeprom_phy_serdes = 1;
13018
13019                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13020                 if (nic_phy_id != 0) {
13021                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13022                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13023
13024                         eeprom_phy_id  = (id1 >> 16) << 10;
13025                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13026                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13027                 } else
13028                         eeprom_phy_id = 0;
13029
13030                 tp->phy_id = eeprom_phy_id;
13031                 if (eeprom_phy_serdes) {
13032                         if (!tg3_flag(tp, 5705_PLUS))
13033                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13034                         else
13035                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13036                 }
13037
13038                 if (tg3_flag(tp, 5750_PLUS))
13039                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13040                                     SHASTA_EXT_LED_MODE_MASK);
13041                 else
13042                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13043
13044                 switch (led_cfg) {
13045                 default:
13046                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13047                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13048                         break;
13049
13050                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13051                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13052                         break;
13053
13054                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13055                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13056
13057                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13058                          * read on some older 5700/5701 bootcode.
13059                          */
13060                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13061                             ASIC_REV_5700 ||
13062                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13063                             ASIC_REV_5701)
13064                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13065
13066                         break;
13067
13068                 case SHASTA_EXT_LED_SHARED:
13069                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13070                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13071                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13072                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13073                                                  LED_CTRL_MODE_PHY_2);
13074                         break;
13075
13076                 case SHASTA_EXT_LED_MAC:
13077                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13078                         break;
13079
13080                 case SHASTA_EXT_LED_COMBO:
13081                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13082                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13083                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13084                                                  LED_CTRL_MODE_PHY_2);
13085                         break;
13086
13087                 }
13088
13089                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13090                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13091                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13092                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13093
13094                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13095                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13096
13097                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13098                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13099                         if ((tp->pdev->subsystem_vendor ==
13100                              PCI_VENDOR_ID_ARIMA) &&
13101                             (tp->pdev->subsystem_device == 0x205a ||
13102                              tp->pdev->subsystem_device == 0x2063))
13103                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13104                 } else {
13105                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13106                         tg3_flag_set(tp, IS_NIC);
13107                 }
13108
13109                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13110                         tg3_flag_set(tp, ENABLE_ASF);
13111                         if (tg3_flag(tp, 5750_PLUS))
13112                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13113                 }
13114
13115                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13116                     tg3_flag(tp, 5750_PLUS))
13117                         tg3_flag_set(tp, ENABLE_APE);
13118
13119                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13120                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13121                         tg3_flag_clear(tp, WOL_CAP);
13122
13123                 if (tg3_flag(tp, WOL_CAP) &&
13124                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13125                         tg3_flag_set(tp, WOL_ENABLE);
13126                         device_set_wakeup_enable(&tp->pdev->dev, true);
13127                 }
13128
13129                 if (cfg2 & (1 << 17))
13130                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13131
13132                 /* serdes signal pre-emphasis in register 0x590 set by */
13133                 /* bootcode if bit 18 is set */
13134                 if (cfg2 & (1 << 18))
13135                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13136
13137                 if ((tg3_flag(tp, 57765_PLUS) ||
13138                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13139                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13140                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13141                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13142
13143                 if (tg3_flag(tp, PCI_EXPRESS) &&
13144                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13145                     !tg3_flag(tp, 57765_PLUS)) {
13146                         u32 cfg3;
13147
13148                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13149                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13150                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13151                 }
13152
13153                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13154                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13155                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13156                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13157                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13158                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13159         }
13160 done:
13161         if (tg3_flag(tp, WOL_CAP))
13162                 device_set_wakeup_enable(&tp->pdev->dev,
13163                                          tg3_flag(tp, WOL_ENABLE));
13164         else
13165                 device_set_wakeup_capable(&tp->pdev->dev, false);
13166 }
13167
13168 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13169 {
13170         int i;
13171         u32 val;
13172
13173         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13174         tw32(OTP_CTRL, cmd);
13175
13176         /* Wait for up to 1 ms for command to execute. */
13177         for (i = 0; i < 100; i++) {
13178                 val = tr32(OTP_STATUS);
13179                 if (val & OTP_STATUS_CMD_DONE)
13180                         break;
13181                 udelay(10);
13182         }
13183
13184         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13185 }
13186
13187 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13188  * configuration is a 32-bit value that straddles the alignment boundary.
13189  * We do two 32-bit reads and then shift and merge the results.
13190  */
13191 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13192 {
13193         u32 bhalf_otp, thalf_otp;
13194
13195         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13196
13197         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13198                 return 0;
13199
13200         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13201
13202         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13203                 return 0;
13204
13205         thalf_otp = tr32(OTP_READ_DATA);
13206
13207         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13208
13209         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13210                 return 0;
13211
13212         bhalf_otp = tr32(OTP_READ_DATA);
13213
13214         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13215 }
13216
13217 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13218 {
13219         u32 adv = ADVERTISED_Autoneg |
13220                   ADVERTISED_Pause;
13221
13222         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13223                 adv |= ADVERTISED_1000baseT_Half |
13224                        ADVERTISED_1000baseT_Full;
13225
13226         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13227                 adv |= ADVERTISED_100baseT_Half |
13228                        ADVERTISED_100baseT_Full |
13229                        ADVERTISED_10baseT_Half |
13230                        ADVERTISED_10baseT_Full |
13231                        ADVERTISED_TP;
13232         else
13233                 adv |= ADVERTISED_FIBRE;
13234
13235         tp->link_config.advertising = adv;
13236         tp->link_config.speed = SPEED_INVALID;
13237         tp->link_config.duplex = DUPLEX_INVALID;
13238         tp->link_config.autoneg = AUTONEG_ENABLE;
13239         tp->link_config.active_speed = SPEED_INVALID;
13240         tp->link_config.active_duplex = DUPLEX_INVALID;
13241         tp->link_config.orig_speed = SPEED_INVALID;
13242         tp->link_config.orig_duplex = DUPLEX_INVALID;
13243         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13244 }
13245
13246 static int __devinit tg3_phy_probe(struct tg3 *tp)
13247 {
13248         u32 hw_phy_id_1, hw_phy_id_2;
13249         u32 hw_phy_id, hw_phy_id_masked;
13250         int err;
13251
13252         /* flow control autonegotiation is default behavior */
13253         tg3_flag_set(tp, PAUSE_AUTONEG);
13254         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13255
13256         if (tg3_flag(tp, USE_PHYLIB))
13257                 return tg3_phy_init(tp);
13258
13259         /* Reading the PHY ID register can conflict with ASF
13260          * firmware access to the PHY hardware.
13261          */
13262         err = 0;
13263         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13264                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13265         } else {
13266                 /* Now read the physical PHY_ID from the chip and verify
13267                  * that it is sane.  If it doesn't look good, we fall back
13268                  * to either the hard-coded table based PHY_ID and failing
13269                  * that the value found in the eeprom area.
13270                  */
13271                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13272                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13273
13274                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13275                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13276                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13277
13278                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13279         }
13280
13281         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13282                 tp->phy_id = hw_phy_id;
13283                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13284                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13285                 else
13286                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13287         } else {
13288                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13289                         /* Do nothing, phy ID already set up in
13290                          * tg3_get_eeprom_hw_cfg().
13291                          */
13292                 } else {
13293                         struct subsys_tbl_ent *p;
13294
13295                         /* No eeprom signature?  Try the hardcoded
13296                          * subsys device table.
13297                          */
13298                         p = tg3_lookup_by_subsys(tp);
13299                         if (!p)
13300                                 return -ENODEV;
13301
13302                         tp->phy_id = p->phy_id;
13303                         if (!tp->phy_id ||
13304                             tp->phy_id == TG3_PHY_ID_BCM8002)
13305                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13306                 }
13307         }
13308
13309         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13310             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13311              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13312              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13313               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13314              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13315               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13316                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13317
13318         tg3_phy_init_link_config(tp);
13319
13320         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13321             !tg3_flag(tp, ENABLE_APE) &&
13322             !tg3_flag(tp, ENABLE_ASF)) {
13323                 u32 bmsr, mask;
13324
13325                 tg3_readphy(tp, MII_BMSR, &bmsr);
13326                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13327                     (bmsr & BMSR_LSTATUS))
13328                         goto skip_phy_reset;
13329
13330                 err = tg3_phy_reset(tp);
13331                 if (err)
13332                         return err;
13333
13334                 tg3_phy_set_wirespeed(tp);
13335
13336                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13337                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13338                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13339                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13340                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13341                                             tp->link_config.flowctrl);
13342
13343                         tg3_writephy(tp, MII_BMCR,
13344                                      BMCR_ANENABLE | BMCR_ANRESTART);
13345                 }
13346         }
13347
13348 skip_phy_reset:
13349         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13350                 err = tg3_init_5401phy_dsp(tp);
13351                 if (err)
13352                         return err;
13353
13354                 err = tg3_init_5401phy_dsp(tp);
13355         }
13356
13357         return err;
13358 }
13359
13360 static void __devinit tg3_read_vpd(struct tg3 *tp)
13361 {
13362         u8 *vpd_data;
13363         unsigned int block_end, rosize, len;
13364         u32 vpdlen;
13365         int j, i = 0;
13366
13367         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13368         if (!vpd_data)
13369                 goto out_no_vpd;
13370
13371         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13372         if (i < 0)
13373                 goto out_not_found;
13374
13375         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13376         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13377         i += PCI_VPD_LRDT_TAG_SIZE;
13378
13379         if (block_end > vpdlen)
13380                 goto out_not_found;
13381
13382         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13383                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13384         if (j > 0) {
13385                 len = pci_vpd_info_field_size(&vpd_data[j]);
13386
13387                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13388                 if (j + len > block_end || len != 4 ||
13389                     memcmp(&vpd_data[j], "1028", 4))
13390                         goto partno;
13391
13392                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13393                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13394                 if (j < 0)
13395                         goto partno;
13396
13397                 len = pci_vpd_info_field_size(&vpd_data[j]);
13398
13399                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13400                 if (j + len > block_end)
13401                         goto partno;
13402
13403                 memcpy(tp->fw_ver, &vpd_data[j], len);
13404                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13405         }
13406
13407 partno:
13408         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13409                                       PCI_VPD_RO_KEYWORD_PARTNO);
13410         if (i < 0)
13411                 goto out_not_found;
13412
13413         len = pci_vpd_info_field_size(&vpd_data[i]);
13414
13415         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13416         if (len > TG3_BPN_SIZE ||
13417             (len + i) > vpdlen)
13418                 goto out_not_found;
13419
13420         memcpy(tp->board_part_number, &vpd_data[i], len);
13421
13422 out_not_found:
13423         kfree(vpd_data);
13424         if (tp->board_part_number[0])
13425                 return;
13426
13427 out_no_vpd:
13428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13429                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13430                         strcpy(tp->board_part_number, "BCM5717");
13431                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13432                         strcpy(tp->board_part_number, "BCM5718");
13433                 else
13434                         goto nomatch;
13435         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13436                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13437                         strcpy(tp->board_part_number, "BCM57780");
13438                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13439                         strcpy(tp->board_part_number, "BCM57760");
13440                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13441                         strcpy(tp->board_part_number, "BCM57790");
13442                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13443                         strcpy(tp->board_part_number, "BCM57788");
13444                 else
13445                         goto nomatch;
13446         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13447                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13448                         strcpy(tp->board_part_number, "BCM57761");
13449                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13450                         strcpy(tp->board_part_number, "BCM57765");
13451                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13452                         strcpy(tp->board_part_number, "BCM57781");
13453                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13454                         strcpy(tp->board_part_number, "BCM57785");
13455                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13456                         strcpy(tp->board_part_number, "BCM57791");
13457                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13458                         strcpy(tp->board_part_number, "BCM57795");
13459                 else
13460                         goto nomatch;
13461         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13462                 strcpy(tp->board_part_number, "BCM95906");
13463         } else {
13464 nomatch:
13465                 strcpy(tp->board_part_number, "none");
13466         }
13467 }
13468
13469 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13470 {
13471         u32 val;
13472
13473         if (tg3_nvram_read(tp, offset, &val) ||
13474             (val & 0xfc000000) != 0x0c000000 ||
13475             tg3_nvram_read(tp, offset + 4, &val) ||
13476             val != 0)
13477                 return 0;
13478
13479         return 1;
13480 }
13481
13482 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13483 {
13484         u32 val, offset, start, ver_offset;
13485         int i, dst_off;
13486         bool newver = false;
13487
13488         if (tg3_nvram_read(tp, 0xc, &offset) ||
13489             tg3_nvram_read(tp, 0x4, &start))
13490                 return;
13491
13492         offset = tg3_nvram_logical_addr(tp, offset);
13493
13494         if (tg3_nvram_read(tp, offset, &val))
13495                 return;
13496
13497         if ((val & 0xfc000000) == 0x0c000000) {
13498                 if (tg3_nvram_read(tp, offset + 4, &val))
13499                         return;
13500
13501                 if (val == 0)
13502                         newver = true;
13503         }
13504
13505         dst_off = strlen(tp->fw_ver);
13506
13507         if (newver) {
13508                 if (TG3_VER_SIZE - dst_off < 16 ||
13509                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13510                         return;
13511
13512                 offset = offset + ver_offset - start;
13513                 for (i = 0; i < 16; i += 4) {
13514                         __be32 v;
13515                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13516                                 return;
13517
13518                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13519                 }
13520         } else {
13521                 u32 major, minor;
13522
13523                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13524                         return;
13525
13526                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13527                         TG3_NVM_BCVER_MAJSFT;
13528                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13529                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13530                          "v%d.%02d", major, minor);
13531         }
13532 }
13533
13534 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13535 {
13536         u32 val, major, minor;
13537
13538         /* Use native endian representation */
13539         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13540                 return;
13541
13542         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13543                 TG3_NVM_HWSB_CFG1_MAJSFT;
13544         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13545                 TG3_NVM_HWSB_CFG1_MINSFT;
13546
13547         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13548 }
13549
13550 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13551 {
13552         u32 offset, major, minor, build;
13553
13554         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13555
13556         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13557                 return;
13558
13559         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13560         case TG3_EEPROM_SB_REVISION_0:
13561                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13562                 break;
13563         case TG3_EEPROM_SB_REVISION_2:
13564                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13565                 break;
13566         case TG3_EEPROM_SB_REVISION_3:
13567                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13568                 break;
13569         case TG3_EEPROM_SB_REVISION_4:
13570                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13571                 break;
13572         case TG3_EEPROM_SB_REVISION_5:
13573                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13574                 break;
13575         case TG3_EEPROM_SB_REVISION_6:
13576                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13577                 break;
13578         default:
13579                 return;
13580         }
13581
13582         if (tg3_nvram_read(tp, offset, &val))
13583                 return;
13584
13585         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13586                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13587         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13588                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13589         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13590
13591         if (minor > 99 || build > 26)
13592                 return;
13593
13594         offset = strlen(tp->fw_ver);
13595         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13596                  " v%d.%02d", major, minor);
13597
13598         if (build > 0) {
13599                 offset = strlen(tp->fw_ver);
13600                 if (offset < TG3_VER_SIZE - 1)
13601                         tp->fw_ver[offset] = 'a' + build - 1;
13602         }
13603 }
13604
13605 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13606 {
13607         u32 val, offset, start;
13608         int i, vlen;
13609
13610         for (offset = TG3_NVM_DIR_START;
13611              offset < TG3_NVM_DIR_END;
13612              offset += TG3_NVM_DIRENT_SIZE) {
13613                 if (tg3_nvram_read(tp, offset, &val))
13614                         return;
13615
13616                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13617                         break;
13618         }
13619
13620         if (offset == TG3_NVM_DIR_END)
13621                 return;
13622
13623         if (!tg3_flag(tp, 5705_PLUS))
13624                 start = 0x08000000;
13625         else if (tg3_nvram_read(tp, offset - 4, &start))
13626                 return;
13627
13628         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13629             !tg3_fw_img_is_valid(tp, offset) ||
13630             tg3_nvram_read(tp, offset + 8, &val))
13631                 return;
13632
13633         offset += val - start;
13634
13635         vlen = strlen(tp->fw_ver);
13636
13637         tp->fw_ver[vlen++] = ',';
13638         tp->fw_ver[vlen++] = ' ';
13639
13640         for (i = 0; i < 4; i++) {
13641                 __be32 v;
13642                 if (tg3_nvram_read_be32(tp, offset, &v))
13643                         return;
13644
13645                 offset += sizeof(v);
13646
13647                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13648                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13649                         break;
13650                 }
13651
13652                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13653                 vlen += sizeof(v);
13654         }
13655 }
13656
13657 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13658 {
13659         int vlen;
13660         u32 apedata;
13661         char *fwtype;
13662
13663         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13664                 return;
13665
13666         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13667         if (apedata != APE_SEG_SIG_MAGIC)
13668                 return;
13669
13670         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13671         if (!(apedata & APE_FW_STATUS_READY))
13672                 return;
13673
13674         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13675
13676         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13677                 tg3_flag_set(tp, APE_HAS_NCSI);
13678                 fwtype = "NCSI";
13679         } else {
13680                 fwtype = "DASH";
13681         }
13682
13683         vlen = strlen(tp->fw_ver);
13684
13685         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13686                  fwtype,
13687                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13688                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13689                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13690                  (apedata & APE_FW_VERSION_BLDMSK));
13691 }
13692
13693 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13694 {
13695         u32 val;
13696         bool vpd_vers = false;
13697
13698         if (tp->fw_ver[0] != 0)
13699                 vpd_vers = true;
13700
13701         if (tg3_flag(tp, NO_NVRAM)) {
13702                 strcat(tp->fw_ver, "sb");
13703                 return;
13704         }
13705
13706         if (tg3_nvram_read(tp, 0, &val))
13707                 return;
13708
13709         if (val == TG3_EEPROM_MAGIC)
13710                 tg3_read_bc_ver(tp);
13711         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13712                 tg3_read_sb_ver(tp, val);
13713         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13714                 tg3_read_hwsb_ver(tp);
13715         else
13716                 return;
13717
13718         if (vpd_vers)
13719                 goto done;
13720
13721         if (tg3_flag(tp, ENABLE_APE)) {
13722                 if (tg3_flag(tp, ENABLE_ASF))
13723                         tg3_read_dash_ver(tp);
13724         } else if (tg3_flag(tp, ENABLE_ASF)) {
13725                 tg3_read_mgmtfw_ver(tp);
13726         }
13727
13728 done:
13729         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13730 }
13731
13732 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13733
13734 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13735 {
13736         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13737                 return TG3_RX_RET_MAX_SIZE_5717;
13738         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13739                 return TG3_RX_RET_MAX_SIZE_5700;
13740         else
13741                 return TG3_RX_RET_MAX_SIZE_5705;
13742 }
13743
13744 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13745         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13746         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13747         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13748         { },
13749 };
13750
13751 static int __devinit tg3_get_invariants(struct tg3 *tp)
13752 {
13753         u32 misc_ctrl_reg;
13754         u32 pci_state_reg, grc_misc_cfg;
13755         u32 val;
13756         u16 pci_cmd;
13757         int err;
13758
13759         /* Force memory write invalidate off.  If we leave it on,
13760          * then on 5700_BX chips we have to enable a workaround.
13761          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13762          * to match the cacheline size.  The Broadcom driver have this
13763          * workaround but turns MWI off all the times so never uses
13764          * it.  This seems to suggest that the workaround is insufficient.
13765          */
13766         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13767         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13768         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13769
13770         /* Important! -- Make sure register accesses are byteswapped
13771          * correctly.  Also, for those chips that require it, make
13772          * sure that indirect register accesses are enabled before
13773          * the first operation.
13774          */
13775         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13776                               &misc_ctrl_reg);
13777         tp->misc_host_ctrl |= (misc_ctrl_reg &
13778                                MISC_HOST_CTRL_CHIPREV);
13779         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13780                                tp->misc_host_ctrl);
13781
13782         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13783                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13785                 u32 prod_id_asic_rev;
13786
13787                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13788                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13789                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13790                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13791                         pci_read_config_dword(tp->pdev,
13792                                               TG3PCI_GEN2_PRODID_ASICREV,
13793                                               &prod_id_asic_rev);
13794                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13795                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13796                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13797                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13798                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13799                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13800                         pci_read_config_dword(tp->pdev,
13801                                               TG3PCI_GEN15_PRODID_ASICREV,
13802                                               &prod_id_asic_rev);
13803                 else
13804                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13805                                               &prod_id_asic_rev);
13806
13807                 tp->pci_chip_rev_id = prod_id_asic_rev;
13808         }
13809
13810         /* Wrong chip ID in 5752 A0. This code can be removed later
13811          * as A0 is not in production.
13812          */
13813         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13814                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13815
13816         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13817          * we need to disable memory and use config. cycles
13818          * only to access all registers. The 5702/03 chips
13819          * can mistakenly decode the special cycles from the
13820          * ICH chipsets as memory write cycles, causing corruption
13821          * of register and memory space. Only certain ICH bridges
13822          * will drive special cycles with non-zero data during the
13823          * address phase which can fall within the 5703's address
13824          * range. This is not an ICH bug as the PCI spec allows
13825          * non-zero address during special cycles. However, only
13826          * these ICH bridges are known to drive non-zero addresses
13827          * during special cycles.
13828          *
13829          * Since special cycles do not cross PCI bridges, we only
13830          * enable this workaround if the 5703 is on the secondary
13831          * bus of these ICH bridges.
13832          */
13833         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13834             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13835                 static struct tg3_dev_id {
13836                         u32     vendor;
13837                         u32     device;
13838                         u32     rev;
13839                 } ich_chipsets[] = {
13840                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13841                           PCI_ANY_ID },
13842                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13843                           PCI_ANY_ID },
13844                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13845                           0xa },
13846                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13847                           PCI_ANY_ID },
13848                         { },
13849                 };
13850                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13851                 struct pci_dev *bridge = NULL;
13852
13853                 while (pci_id->vendor != 0) {
13854                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13855                                                 bridge);
13856                         if (!bridge) {
13857                                 pci_id++;
13858                                 continue;
13859                         }
13860                         if (pci_id->rev != PCI_ANY_ID) {
13861                                 if (bridge->revision > pci_id->rev)
13862                                         continue;
13863                         }
13864                         if (bridge->subordinate &&
13865                             (bridge->subordinate->number ==
13866                              tp->pdev->bus->number)) {
13867                                 tg3_flag_set(tp, ICH_WORKAROUND);
13868                                 pci_dev_put(bridge);
13869                                 break;
13870                         }
13871                 }
13872         }
13873
13874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13875                 static struct tg3_dev_id {
13876                         u32     vendor;
13877                         u32     device;
13878                 } bridge_chipsets[] = {
13879                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13880                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13881                         { },
13882                 };
13883                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13884                 struct pci_dev *bridge = NULL;
13885
13886                 while (pci_id->vendor != 0) {
13887                         bridge = pci_get_device(pci_id->vendor,
13888                                                 pci_id->device,
13889                                                 bridge);
13890                         if (!bridge) {
13891                                 pci_id++;
13892                                 continue;
13893                         }
13894                         if (bridge->subordinate &&
13895                             (bridge->subordinate->number <=
13896                              tp->pdev->bus->number) &&
13897                             (bridge->subordinate->subordinate >=
13898                              tp->pdev->bus->number)) {
13899                                 tg3_flag_set(tp, 5701_DMA_BUG);
13900                                 pci_dev_put(bridge);
13901                                 break;
13902                         }
13903                 }
13904         }
13905
13906         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13907          * DMA addresses > 40-bit. This bridge may have other additional
13908          * 57xx devices behind it in some 4-port NIC designs for example.
13909          * Any tg3 device found behind the bridge will also need the 40-bit
13910          * DMA workaround.
13911          */
13912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13913             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13914                 tg3_flag_set(tp, 5780_CLASS);
13915                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13916                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13917         } else {
13918                 struct pci_dev *bridge = NULL;
13919
13920                 do {
13921                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13922                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13923                                                 bridge);
13924                         if (bridge && bridge->subordinate &&
13925                             (bridge->subordinate->number <=
13926                              tp->pdev->bus->number) &&
13927                             (bridge->subordinate->subordinate >=
13928                              tp->pdev->bus->number)) {
13929                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13930                                 pci_dev_put(bridge);
13931                                 break;
13932                         }
13933                 } while (bridge);
13934         }
13935
13936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13938                 tp->pdev_peer = tg3_find_peer(tp);
13939
13940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13942             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13943                 tg3_flag_set(tp, 5717_PLUS);
13944
13945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13946             tg3_flag(tp, 5717_PLUS))
13947                 tg3_flag_set(tp, 57765_PLUS);
13948
13949         /* Intentionally exclude ASIC_REV_5906 */
13950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13952             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13954             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13955             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13956             tg3_flag(tp, 57765_PLUS))
13957                 tg3_flag_set(tp, 5755_PLUS);
13958
13959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13961             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13962             tg3_flag(tp, 5755_PLUS) ||
13963             tg3_flag(tp, 5780_CLASS))
13964                 tg3_flag_set(tp, 5750_PLUS);
13965
13966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13967             tg3_flag(tp, 5750_PLUS))
13968                 tg3_flag_set(tp, 5705_PLUS);
13969
13970         /* Determine TSO capabilities */
13971         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13972                 ; /* Do nothing. HW bug. */
13973         else if (tg3_flag(tp, 57765_PLUS))
13974                 tg3_flag_set(tp, HW_TSO_3);
13975         else if (tg3_flag(tp, 5755_PLUS) ||
13976                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13977                 tg3_flag_set(tp, HW_TSO_2);
13978         else if (tg3_flag(tp, 5750_PLUS)) {
13979                 tg3_flag_set(tp, HW_TSO_1);
13980                 tg3_flag_set(tp, TSO_BUG);
13981                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13982                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13983                         tg3_flag_clear(tp, TSO_BUG);
13984         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13985                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13986                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13987                         tg3_flag_set(tp, TSO_BUG);
13988                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13989                         tp->fw_needed = FIRMWARE_TG3TSO5;
13990                 else
13991                         tp->fw_needed = FIRMWARE_TG3TSO;
13992         }
13993
13994         /* Selectively allow TSO based on operating conditions */
13995         if (tg3_flag(tp, HW_TSO_1) ||
13996             tg3_flag(tp, HW_TSO_2) ||
13997             tg3_flag(tp, HW_TSO_3) ||
13998             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13999                 tg3_flag_set(tp, TSO_CAPABLE);
14000         else {
14001                 tg3_flag_clear(tp, TSO_CAPABLE);
14002                 tg3_flag_clear(tp, TSO_BUG);
14003                 tp->fw_needed = NULL;
14004         }
14005
14006         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14007                 tp->fw_needed = FIRMWARE_TG3;
14008
14009         tp->irq_max = 1;
14010
14011         if (tg3_flag(tp, 5750_PLUS)) {
14012                 tg3_flag_set(tp, SUPPORT_MSI);
14013                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14014                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14015                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14016                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14017                      tp->pdev_peer == tp->pdev))
14018                         tg3_flag_clear(tp, SUPPORT_MSI);
14019
14020                 if (tg3_flag(tp, 5755_PLUS) ||
14021                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14022                         tg3_flag_set(tp, 1SHOT_MSI);
14023                 }
14024
14025                 if (tg3_flag(tp, 57765_PLUS)) {
14026                         tg3_flag_set(tp, SUPPORT_MSIX);
14027                         tp->irq_max = TG3_IRQ_MAX_VECS;
14028                 }
14029         }
14030
14031         if (tg3_flag(tp, 5755_PLUS))
14032                 tg3_flag_set(tp, SHORT_DMA_BUG);
14033
14034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14035                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14036
14037         if (tg3_flag(tp, 5717_PLUS))
14038                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14039
14040         if (tg3_flag(tp, 57765_PLUS) &&
14041             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14042                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14043
14044         if (!tg3_flag(tp, 5705_PLUS) ||
14045             tg3_flag(tp, 5780_CLASS) ||
14046             tg3_flag(tp, USE_JUMBO_BDFLAG))
14047                 tg3_flag_set(tp, JUMBO_CAPABLE);
14048
14049         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14050                               &pci_state_reg);
14051
14052         if (pci_is_pcie(tp->pdev)) {
14053                 u16 lnkctl;
14054
14055                 tg3_flag_set(tp, PCI_EXPRESS);
14056
14057                 tp->pcie_readrq = 4096;
14058                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14060                         tp->pcie_readrq = 2048;
14061
14062                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14063
14064                 pci_read_config_word(tp->pdev,
14065                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14066                                      &lnkctl);
14067                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14069                             ASIC_REV_5906) {
14070                                 tg3_flag_clear(tp, HW_TSO_2);
14071                                 tg3_flag_clear(tp, TSO_CAPABLE);
14072                         }
14073                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14074                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14075                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14076                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14077                                 tg3_flag_set(tp, CLKREQ_BUG);
14078                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14079                         tg3_flag_set(tp, L1PLLPD_EN);
14080                 }
14081         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14082                 /* BCM5785 devices are effectively PCIe devices, and should
14083                  * follow PCIe codepaths, but do not have a PCIe capabilities
14084                  * section.
14085                 */
14086                 tg3_flag_set(tp, PCI_EXPRESS);
14087         } else if (!tg3_flag(tp, 5705_PLUS) ||
14088                    tg3_flag(tp, 5780_CLASS)) {
14089                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14090                 if (!tp->pcix_cap) {
14091                         dev_err(&tp->pdev->dev,
14092                                 "Cannot find PCI-X capability, aborting\n");
14093                         return -EIO;
14094                 }
14095
14096                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14097                         tg3_flag_set(tp, PCIX_MODE);
14098         }
14099
14100         /* If we have an AMD 762 or VIA K8T800 chipset, write
14101          * reordering to the mailbox registers done by the host
14102          * controller can cause major troubles.  We read back from
14103          * every mailbox register write to force the writes to be
14104          * posted to the chip in order.
14105          */
14106         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14107             !tg3_flag(tp, PCI_EXPRESS))
14108                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14109
14110         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14111                              &tp->pci_cacheline_sz);
14112         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14113                              &tp->pci_lat_timer);
14114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14115             tp->pci_lat_timer < 64) {
14116                 tp->pci_lat_timer = 64;
14117                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14118                                       tp->pci_lat_timer);
14119         }
14120
14121         /* Important! -- It is critical that the PCI-X hw workaround
14122          * situation is decided before the first MMIO register access.
14123          */
14124         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14125                 /* 5700 BX chips need to have their TX producer index
14126                  * mailboxes written twice to workaround a bug.
14127                  */
14128                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14129
14130                 /* If we are in PCI-X mode, enable register write workaround.
14131                  *
14132                  * The workaround is to use indirect register accesses
14133                  * for all chip writes not to mailbox registers.
14134                  */
14135                 if (tg3_flag(tp, PCIX_MODE)) {
14136                         u32 pm_reg;
14137
14138                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14139
14140                         /* The chip can have it's power management PCI config
14141                          * space registers clobbered due to this bug.
14142                          * So explicitly force the chip into D0 here.
14143                          */
14144                         pci_read_config_dword(tp->pdev,
14145                                               tp->pm_cap + PCI_PM_CTRL,
14146                                               &pm_reg);
14147                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14148                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14149                         pci_write_config_dword(tp->pdev,
14150                                                tp->pm_cap + PCI_PM_CTRL,
14151                                                pm_reg);
14152
14153                         /* Also, force SERR#/PERR# in PCI command. */
14154                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14155                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14156                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14157                 }
14158         }
14159
14160         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14161                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14162         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14163                 tg3_flag_set(tp, PCI_32BIT);
14164
14165         /* Chip-specific fixup from Broadcom driver */
14166         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14167             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14168                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14169                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14170         }
14171
14172         /* Default fast path register access methods */
14173         tp->read32 = tg3_read32;
14174         tp->write32 = tg3_write32;
14175         tp->read32_mbox = tg3_read32;
14176         tp->write32_mbox = tg3_write32;
14177         tp->write32_tx_mbox = tg3_write32;
14178         tp->write32_rx_mbox = tg3_write32;
14179
14180         /* Various workaround register access methods */
14181         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14182                 tp->write32 = tg3_write_indirect_reg32;
14183         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14184                  (tg3_flag(tp, PCI_EXPRESS) &&
14185                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14186                 /*
14187                  * Back to back register writes can cause problems on these
14188                  * chips, the workaround is to read back all reg writes
14189                  * except those to mailbox regs.
14190                  *
14191                  * See tg3_write_indirect_reg32().
14192                  */
14193                 tp->write32 = tg3_write_flush_reg32;
14194         }
14195
14196         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14197                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14198                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14199                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14200         }
14201
14202         if (tg3_flag(tp, ICH_WORKAROUND)) {
14203                 tp->read32 = tg3_read_indirect_reg32;
14204                 tp->write32 = tg3_write_indirect_reg32;
14205                 tp->read32_mbox = tg3_read_indirect_mbox;
14206                 tp->write32_mbox = tg3_write_indirect_mbox;
14207                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14208                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14209
14210                 iounmap(tp->regs);
14211                 tp->regs = NULL;
14212
14213                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14214                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14215                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14216         }
14217         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14218                 tp->read32_mbox = tg3_read32_mbox_5906;
14219                 tp->write32_mbox = tg3_write32_mbox_5906;
14220                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14221                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14222         }
14223
14224         if (tp->write32 == tg3_write_indirect_reg32 ||
14225             (tg3_flag(tp, PCIX_MODE) &&
14226              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14227               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14228                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14229
14230         /* The memory arbiter has to be enabled in order for SRAM accesses
14231          * to succeed.  Normally on powerup the tg3 chip firmware will make
14232          * sure it is enabled, but other entities such as system netboot
14233          * code might disable it.
14234          */
14235         val = tr32(MEMARB_MODE);
14236         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14237
14238         if (tg3_flag(tp, PCIX_MODE)) {
14239                 pci_read_config_dword(tp->pdev,
14240                                       tp->pcix_cap + PCI_X_STATUS, &val);
14241                 tp->pci_fn = val & 0x7;
14242         } else {
14243                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14244         }
14245
14246         /* Get eeprom hw config before calling tg3_set_power_state().
14247          * In particular, the TG3_FLAG_IS_NIC flag must be
14248          * determined before calling tg3_set_power_state() so that
14249          * we know whether or not to switch out of Vaux power.
14250          * When the flag is set, it means that GPIO1 is used for eeprom
14251          * write protect and also implies that it is a LOM where GPIOs
14252          * are not used to switch power.
14253          */
14254         tg3_get_eeprom_hw_cfg(tp);
14255
14256         if (tg3_flag(tp, ENABLE_APE)) {
14257                 /* Allow reads and writes to the
14258                  * APE register and memory space.
14259                  */
14260                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14261                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14262                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14263                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14264                                        pci_state_reg);
14265
14266                 tg3_ape_lock_init(tp);
14267         }
14268
14269         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14270             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14271             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14273             tg3_flag(tp, 57765_PLUS))
14274                 tg3_flag_set(tp, CPMU_PRESENT);
14275
14276         /* Set up tp->grc_local_ctrl before calling
14277          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14278          * will bring 5700's external PHY out of reset.
14279          * It is also used as eeprom write protect on LOMs.
14280          */
14281         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14283             tg3_flag(tp, EEPROM_WRITE_PROT))
14284                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14285                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14286         /* Unused GPIO3 must be driven as output on 5752 because there
14287          * are no pull-up resistors on unused GPIO pins.
14288          */
14289         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14290                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14291
14292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14293             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14295                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14296
14297         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14298             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14299                 /* Turn off the debug UART. */
14300                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14301                 if (tg3_flag(tp, IS_NIC))
14302                         /* Keep VMain power. */
14303                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14304                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14305         }
14306
14307         /* Switch out of Vaux if it is a NIC */
14308         tg3_pwrsrc_switch_to_vmain(tp);
14309
14310         /* Derive initial jumbo mode from MTU assigned in
14311          * ether_setup() via the alloc_etherdev() call
14312          */
14313         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14314                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14315
14316         /* Determine WakeOnLan speed to use. */
14317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14318             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14319             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14320             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14321                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14322         } else {
14323                 tg3_flag_set(tp, WOL_SPEED_100MB);
14324         }
14325
14326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14327                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14328
14329         /* A few boards don't want Ethernet@WireSpeed phy feature */
14330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14332              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14333              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14334             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14335             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14336                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14337
14338         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14339             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14340                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14341         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14342                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14343
14344         if (tg3_flag(tp, 5705_PLUS) &&
14345             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14346             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14347             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14348             !tg3_flag(tp, 57765_PLUS)) {
14349                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14350                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14351                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14352                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14353                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14354                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14355                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14356                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14357                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14358                 } else
14359                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14360         }
14361
14362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14363             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14364                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14365                 if (tp->phy_otp == 0)
14366                         tp->phy_otp = TG3_OTP_DEFAULT;
14367         }
14368
14369         if (tg3_flag(tp, CPMU_PRESENT))
14370                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14371         else
14372                 tp->mi_mode = MAC_MI_MODE_BASE;
14373
14374         tp->coalesce_mode = 0;
14375         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14376             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14377                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14378
14379         /* Set these bits to enable statistics workaround. */
14380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14381             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14382             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14383                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14384                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14385         }
14386
14387         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14389                 tg3_flag_set(tp, USE_PHYLIB);
14390
14391         err = tg3_mdio_init(tp);
14392         if (err)
14393                 return err;
14394
14395         /* Initialize data/descriptor byte/word swapping. */
14396         val = tr32(GRC_MODE);
14397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14398                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14399                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14400                         GRC_MODE_B2HRX_ENABLE |
14401                         GRC_MODE_HTX2B_ENABLE |
14402                         GRC_MODE_HOST_STACKUP);
14403         else
14404                 val &= GRC_MODE_HOST_STACKUP;
14405
14406         tw32(GRC_MODE, val | tp->grc_mode);
14407
14408         tg3_switch_clocks(tp);
14409
14410         /* Clear this out for sanity. */
14411         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14412
14413         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14414                               &pci_state_reg);
14415         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14416             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14417                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14418
14419                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14420                     chiprevid == CHIPREV_ID_5701_B0 ||
14421                     chiprevid == CHIPREV_ID_5701_B2 ||
14422                     chiprevid == CHIPREV_ID_5701_B5) {
14423                         void __iomem *sram_base;
14424
14425                         /* Write some dummy words into the SRAM status block
14426                          * area, see if it reads back correctly.  If the return
14427                          * value is bad, force enable the PCIX workaround.
14428                          */
14429                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14430
14431                         writel(0x00000000, sram_base);
14432                         writel(0x00000000, sram_base + 4);
14433                         writel(0xffffffff, sram_base + 4);
14434                         if (readl(sram_base) != 0x00000000)
14435                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14436                 }
14437         }
14438
14439         udelay(50);
14440         tg3_nvram_init(tp);
14441
14442         grc_misc_cfg = tr32(GRC_MISC_CFG);
14443         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14444
14445         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14446             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14447              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14448                 tg3_flag_set(tp, IS_5788);
14449
14450         if (!tg3_flag(tp, IS_5788) &&
14451             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14452                 tg3_flag_set(tp, TAGGED_STATUS);
14453         if (tg3_flag(tp, TAGGED_STATUS)) {
14454                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14455                                       HOSTCC_MODE_CLRTICK_TXBD);
14456
14457                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14458                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14459                                        tp->misc_host_ctrl);
14460         }
14461
14462         /* Preserve the APE MAC_MODE bits */
14463         if (tg3_flag(tp, ENABLE_APE))
14464                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14465         else
14466                 tp->mac_mode = 0;
14467
14468         /* these are limited to 10/100 only */
14469         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14470              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14471             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14472              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14473              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14474               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14475               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14476             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14477              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14478               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14479               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14480             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14481             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14482             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14483             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14484                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14485
14486         err = tg3_phy_probe(tp);
14487         if (err) {
14488                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14489                 /* ... but do not return immediately ... */
14490                 tg3_mdio_fini(tp);
14491         }
14492
14493         tg3_read_vpd(tp);
14494         tg3_read_fw_ver(tp);
14495
14496         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14497                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14498         } else {
14499                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14500                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14501                 else
14502                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14503         }
14504
14505         /* 5700 {AX,BX} chips have a broken status block link
14506          * change bit implementation, so we must use the
14507          * status register in those cases.
14508          */
14509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14510                 tg3_flag_set(tp, USE_LINKCHG_REG);
14511         else
14512                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14513
14514         /* The led_ctrl is set during tg3_phy_probe, here we might
14515          * have to force the link status polling mechanism based
14516          * upon subsystem IDs.
14517          */
14518         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14519             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14520             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14521                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14522                 tg3_flag_set(tp, USE_LINKCHG_REG);
14523         }
14524
14525         /* For all SERDES we poll the MAC status register. */
14526         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14527                 tg3_flag_set(tp, POLL_SERDES);
14528         else
14529                 tg3_flag_clear(tp, POLL_SERDES);
14530
14531         tp->rx_offset = NET_IP_ALIGN;
14532         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14534             tg3_flag(tp, PCIX_MODE)) {
14535                 tp->rx_offset = 0;
14536 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14537                 tp->rx_copy_thresh = ~(u16)0;
14538 #endif
14539         }
14540
14541         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14542         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14543         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14544
14545         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14546
14547         /* Increment the rx prod index on the rx std ring by at most
14548          * 8 for these chips to workaround hw errata.
14549          */
14550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14553                 tp->rx_std_max_post = 8;
14554
14555         if (tg3_flag(tp, ASPM_WORKAROUND))
14556                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14557                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14558
14559         return err;
14560 }
14561
14562 #ifdef CONFIG_SPARC
14563 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14564 {
14565         struct net_device *dev = tp->dev;
14566         struct pci_dev *pdev = tp->pdev;
14567         struct device_node *dp = pci_device_to_OF_node(pdev);
14568         const unsigned char *addr;
14569         int len;
14570
14571         addr = of_get_property(dp, "local-mac-address", &len);
14572         if (addr && len == 6) {
14573                 memcpy(dev->dev_addr, addr, 6);
14574                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14575                 return 0;
14576         }
14577         return -ENODEV;
14578 }
14579
14580 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14581 {
14582         struct net_device *dev = tp->dev;
14583
14584         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14585         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14586         return 0;
14587 }
14588 #endif
14589
14590 static int __devinit tg3_get_device_address(struct tg3 *tp)
14591 {
14592         struct net_device *dev = tp->dev;
14593         u32 hi, lo, mac_offset;
14594         int addr_ok = 0;
14595
14596 #ifdef CONFIG_SPARC
14597         if (!tg3_get_macaddr_sparc(tp))
14598                 return 0;
14599 #endif
14600
14601         mac_offset = 0x7c;
14602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14603             tg3_flag(tp, 5780_CLASS)) {
14604                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14605                         mac_offset = 0xcc;
14606                 if (tg3_nvram_lock(tp))
14607                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14608                 else
14609                         tg3_nvram_unlock(tp);
14610         } else if (tg3_flag(tp, 5717_PLUS)) {
14611                 if (tp->pci_fn & 1)
14612                         mac_offset = 0xcc;
14613                 if (tp->pci_fn > 1)
14614                         mac_offset += 0x18c;
14615         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14616                 mac_offset = 0x10;
14617
14618         /* First try to get it from MAC address mailbox. */
14619         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14620         if ((hi >> 16) == 0x484b) {
14621                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14622                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14623
14624                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14625                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14626                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14627                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14628                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14629
14630                 /* Some old bootcode may report a 0 MAC address in SRAM */
14631                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14632         }
14633         if (!addr_ok) {
14634                 /* Next, try NVRAM. */
14635                 if (!tg3_flag(tp, NO_NVRAM) &&
14636                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14637                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14638                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14639                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14640                 }
14641                 /* Finally just fetch it out of the MAC control regs. */
14642                 else {
14643                         hi = tr32(MAC_ADDR_0_HIGH);
14644                         lo = tr32(MAC_ADDR_0_LOW);
14645
14646                         dev->dev_addr[5] = lo & 0xff;
14647                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14648                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14649                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14650                         dev->dev_addr[1] = hi & 0xff;
14651                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14652                 }
14653         }
14654
14655         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14656 #ifdef CONFIG_SPARC
14657                 if (!tg3_get_default_macaddr_sparc(tp))
14658                         return 0;
14659 #endif
14660                 return -EINVAL;
14661         }
14662         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14663         return 0;
14664 }
14665
14666 #define BOUNDARY_SINGLE_CACHELINE       1
14667 #define BOUNDARY_MULTI_CACHELINE        2
14668
14669 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14670 {
14671         int cacheline_size;
14672         u8 byte;
14673         int goal;
14674
14675         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14676         if (byte == 0)
14677                 cacheline_size = 1024;
14678         else
14679                 cacheline_size = (int) byte * 4;
14680
14681         /* On 5703 and later chips, the boundary bits have no
14682          * effect.
14683          */
14684         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14685             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14686             !tg3_flag(tp, PCI_EXPRESS))
14687                 goto out;
14688
14689 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14690         goal = BOUNDARY_MULTI_CACHELINE;
14691 #else
14692 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14693         goal = BOUNDARY_SINGLE_CACHELINE;
14694 #else
14695         goal = 0;
14696 #endif
14697 #endif
14698
14699         if (tg3_flag(tp, 57765_PLUS)) {
14700                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14701                 goto out;
14702         }
14703
14704         if (!goal)
14705                 goto out;
14706
14707         /* PCI controllers on most RISC systems tend to disconnect
14708          * when a device tries to burst across a cache-line boundary.
14709          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14710          *
14711          * Unfortunately, for PCI-E there are only limited
14712          * write-side controls for this, and thus for reads
14713          * we will still get the disconnects.  We'll also waste
14714          * these PCI cycles for both read and write for chips
14715          * other than 5700 and 5701 which do not implement the
14716          * boundary bits.
14717          */
14718         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14719                 switch (cacheline_size) {
14720                 case 16:
14721                 case 32:
14722                 case 64:
14723                 case 128:
14724                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14725                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14726                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14727                         } else {
14728                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14729                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14730                         }
14731                         break;
14732
14733                 case 256:
14734                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14735                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14736                         break;
14737
14738                 default:
14739                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14740                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14741                         break;
14742                 }
14743         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14744                 switch (cacheline_size) {
14745                 case 16:
14746                 case 32:
14747                 case 64:
14748                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14749                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14750                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14751                                 break;
14752                         }
14753                         /* fallthrough */
14754                 case 128:
14755                 default:
14756                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14757                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14758                         break;
14759                 }
14760         } else {
14761                 switch (cacheline_size) {
14762                 case 16:
14763                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14764                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14765                                         DMA_RWCTRL_WRITE_BNDRY_16);
14766                                 break;
14767                         }
14768                         /* fallthrough */
14769                 case 32:
14770                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14771                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14772                                         DMA_RWCTRL_WRITE_BNDRY_32);
14773                                 break;
14774                         }
14775                         /* fallthrough */
14776                 case 64:
14777                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14778                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14779                                         DMA_RWCTRL_WRITE_BNDRY_64);
14780                                 break;
14781                         }
14782                         /* fallthrough */
14783                 case 128:
14784                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14785                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14786                                         DMA_RWCTRL_WRITE_BNDRY_128);
14787                                 break;
14788                         }
14789                         /* fallthrough */
14790                 case 256:
14791                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14792                                 DMA_RWCTRL_WRITE_BNDRY_256);
14793                         break;
14794                 case 512:
14795                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14796                                 DMA_RWCTRL_WRITE_BNDRY_512);
14797                         break;
14798                 case 1024:
14799                 default:
14800                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14801                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14802                         break;
14803                 }
14804         }
14805
14806 out:
14807         return val;
14808 }
14809
14810 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14811 {
14812         struct tg3_internal_buffer_desc test_desc;
14813         u32 sram_dma_descs;
14814         int i, ret;
14815
14816         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14817
14818         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14819         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14820         tw32(RDMAC_STATUS, 0);
14821         tw32(WDMAC_STATUS, 0);
14822
14823         tw32(BUFMGR_MODE, 0);
14824         tw32(FTQ_RESET, 0);
14825
14826         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14827         test_desc.addr_lo = buf_dma & 0xffffffff;
14828         test_desc.nic_mbuf = 0x00002100;
14829         test_desc.len = size;
14830
14831         /*
14832          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14833          * the *second* time the tg3 driver was getting loaded after an
14834          * initial scan.
14835          *
14836          * Broadcom tells me:
14837          *   ...the DMA engine is connected to the GRC block and a DMA
14838          *   reset may affect the GRC block in some unpredictable way...
14839          *   The behavior of resets to individual blocks has not been tested.
14840          *
14841          * Broadcom noted the GRC reset will also reset all sub-components.
14842          */
14843         if (to_device) {
14844                 test_desc.cqid_sqid = (13 << 8) | 2;
14845
14846                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14847                 udelay(40);
14848         } else {
14849                 test_desc.cqid_sqid = (16 << 8) | 7;
14850
14851                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14852                 udelay(40);
14853         }
14854         test_desc.flags = 0x00000005;
14855
14856         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14857                 u32 val;
14858
14859                 val = *(((u32 *)&test_desc) + i);
14860                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14861                                        sram_dma_descs + (i * sizeof(u32)));
14862                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14863         }
14864         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14865
14866         if (to_device)
14867                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14868         else
14869                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14870
14871         ret = -ENODEV;
14872         for (i = 0; i < 40; i++) {
14873                 u32 val;
14874
14875                 if (to_device)
14876                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14877                 else
14878                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14879                 if ((val & 0xffff) == sram_dma_descs) {
14880                         ret = 0;
14881                         break;
14882                 }
14883
14884                 udelay(100);
14885         }
14886
14887         return ret;
14888 }
14889
14890 #define TEST_BUFFER_SIZE        0x2000
14891
14892 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14893         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14894         { },
14895 };
14896
14897 static int __devinit tg3_test_dma(struct tg3 *tp)
14898 {
14899         dma_addr_t buf_dma;
14900         u32 *buf, saved_dma_rwctrl;
14901         int ret = 0;
14902
14903         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14904                                  &buf_dma, GFP_KERNEL);
14905         if (!buf) {
14906                 ret = -ENOMEM;
14907                 goto out_nofree;
14908         }
14909
14910         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14911                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14912
14913         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14914
14915         if (tg3_flag(tp, 57765_PLUS))
14916                 goto out;
14917
14918         if (tg3_flag(tp, PCI_EXPRESS)) {
14919                 /* DMA read watermark not used on PCIE */
14920                 tp->dma_rwctrl |= 0x00180000;
14921         } else if (!tg3_flag(tp, PCIX_MODE)) {
14922                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14923                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14924                         tp->dma_rwctrl |= 0x003f0000;
14925                 else
14926                         tp->dma_rwctrl |= 0x003f000f;
14927         } else {
14928                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14929                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14930                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14931                         u32 read_water = 0x7;
14932
14933                         /* If the 5704 is behind the EPB bridge, we can
14934                          * do the less restrictive ONE_DMA workaround for
14935                          * better performance.
14936                          */
14937                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14938                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14939                                 tp->dma_rwctrl |= 0x8000;
14940                         else if (ccval == 0x6 || ccval == 0x7)
14941                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14942
14943                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14944                                 read_water = 4;
14945                         /* Set bit 23 to enable PCIX hw bug fix */
14946                         tp->dma_rwctrl |=
14947                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14948                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14949                                 (1 << 23);
14950                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14951                         /* 5780 always in PCIX mode */
14952                         tp->dma_rwctrl |= 0x00144000;
14953                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14954                         /* 5714 always in PCIX mode */
14955                         tp->dma_rwctrl |= 0x00148000;
14956                 } else {
14957                         tp->dma_rwctrl |= 0x001b000f;
14958                 }
14959         }
14960
14961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14963                 tp->dma_rwctrl &= 0xfffffff0;
14964
14965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14967                 /* Remove this if it causes problems for some boards. */
14968                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14969
14970                 /* On 5700/5701 chips, we need to set this bit.
14971                  * Otherwise the chip will issue cacheline transactions
14972                  * to streamable DMA memory with not all the byte
14973                  * enables turned on.  This is an error on several
14974                  * RISC PCI controllers, in particular sparc64.
14975                  *
14976                  * On 5703/5704 chips, this bit has been reassigned
14977                  * a different meaning.  In particular, it is used
14978                  * on those chips to enable a PCI-X workaround.
14979                  */
14980                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14981         }
14982
14983         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14984
14985 #if 0
14986         /* Unneeded, already done by tg3_get_invariants.  */
14987         tg3_switch_clocks(tp);
14988 #endif
14989
14990         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14991             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14992                 goto out;
14993
14994         /* It is best to perform DMA test with maximum write burst size
14995          * to expose the 5700/5701 write DMA bug.
14996          */
14997         saved_dma_rwctrl = tp->dma_rwctrl;
14998         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14999         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15000
15001         while (1) {
15002                 u32 *p = buf, i;
15003
15004                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15005                         p[i] = i;
15006
15007                 /* Send the buffer to the chip. */
15008                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15009                 if (ret) {
15010                         dev_err(&tp->pdev->dev,
15011                                 "%s: Buffer write failed. err = %d\n",
15012                                 __func__, ret);
15013                         break;
15014                 }
15015
15016 #if 0
15017                 /* validate data reached card RAM correctly. */
15018                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15019                         u32 val;
15020                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15021                         if (le32_to_cpu(val) != p[i]) {
15022                                 dev_err(&tp->pdev->dev,
15023                                         "%s: Buffer corrupted on device! "
15024                                         "(%d != %d)\n", __func__, val, i);
15025                                 /* ret = -ENODEV here? */
15026                         }
15027                         p[i] = 0;
15028                 }
15029 #endif
15030                 /* Now read it back. */
15031                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15032                 if (ret) {
15033                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15034                                 "err = %d\n", __func__, ret);
15035                         break;
15036                 }
15037
15038                 /* Verify it. */
15039                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15040                         if (p[i] == i)
15041                                 continue;
15042
15043                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15044                             DMA_RWCTRL_WRITE_BNDRY_16) {
15045                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15046                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15047                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15048                                 break;
15049                         } else {
15050                                 dev_err(&tp->pdev->dev,
15051                                         "%s: Buffer corrupted on read back! "
15052                                         "(%d != %d)\n", __func__, p[i], i);
15053                                 ret = -ENODEV;
15054                                 goto out;
15055                         }
15056                 }
15057
15058                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15059                         /* Success. */
15060                         ret = 0;
15061                         break;
15062                 }
15063         }
15064         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15065             DMA_RWCTRL_WRITE_BNDRY_16) {
15066                 /* DMA test passed without adjusting DMA boundary,
15067                  * now look for chipsets that are known to expose the
15068                  * DMA bug without failing the test.
15069                  */
15070                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15071                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15072                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15073                 } else {
15074                         /* Safe to use the calculated DMA boundary. */
15075                         tp->dma_rwctrl = saved_dma_rwctrl;
15076                 }
15077
15078                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15079         }
15080
15081 out:
15082         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15083 out_nofree:
15084         return ret;
15085 }
15086
15087 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15088 {
15089         if (tg3_flag(tp, 57765_PLUS)) {
15090                 tp->bufmgr_config.mbuf_read_dma_low_water =
15091                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15092                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15093                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15094                 tp->bufmgr_config.mbuf_high_water =
15095                         DEFAULT_MB_HIGH_WATER_57765;
15096
15097                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15098                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15099                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15100                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15101                 tp->bufmgr_config.mbuf_high_water_jumbo =
15102                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15103         } else if (tg3_flag(tp, 5705_PLUS)) {
15104                 tp->bufmgr_config.mbuf_read_dma_low_water =
15105                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15106                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15107                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15108                 tp->bufmgr_config.mbuf_high_water =
15109                         DEFAULT_MB_HIGH_WATER_5705;
15110                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15111                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15112                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15113                         tp->bufmgr_config.mbuf_high_water =
15114                                 DEFAULT_MB_HIGH_WATER_5906;
15115                 }
15116
15117                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15118                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15119                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15120                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15121                 tp->bufmgr_config.mbuf_high_water_jumbo =
15122                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15123         } else {
15124                 tp->bufmgr_config.mbuf_read_dma_low_water =
15125                         DEFAULT_MB_RDMA_LOW_WATER;
15126                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15127                         DEFAULT_MB_MACRX_LOW_WATER;
15128                 tp->bufmgr_config.mbuf_high_water =
15129                         DEFAULT_MB_HIGH_WATER;
15130
15131                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15132                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15133                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15134                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15135                 tp->bufmgr_config.mbuf_high_water_jumbo =
15136                         DEFAULT_MB_HIGH_WATER_JUMBO;
15137         }
15138
15139         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15140         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15141 }
15142
15143 static char * __devinit tg3_phy_string(struct tg3 *tp)
15144 {
15145         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15146         case TG3_PHY_ID_BCM5400:        return "5400";
15147         case TG3_PHY_ID_BCM5401:        return "5401";
15148         case TG3_PHY_ID_BCM5411:        return "5411";
15149         case TG3_PHY_ID_BCM5701:        return "5701";
15150         case TG3_PHY_ID_BCM5703:        return "5703";
15151         case TG3_PHY_ID_BCM5704:        return "5704";
15152         case TG3_PHY_ID_BCM5705:        return "5705";
15153         case TG3_PHY_ID_BCM5750:        return "5750";
15154         case TG3_PHY_ID_BCM5752:        return "5752";
15155         case TG3_PHY_ID_BCM5714:        return "5714";
15156         case TG3_PHY_ID_BCM5780:        return "5780";
15157         case TG3_PHY_ID_BCM5755:        return "5755";
15158         case TG3_PHY_ID_BCM5787:        return "5787";
15159         case TG3_PHY_ID_BCM5784:        return "5784";
15160         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15161         case TG3_PHY_ID_BCM5906:        return "5906";
15162         case TG3_PHY_ID_BCM5761:        return "5761";
15163         case TG3_PHY_ID_BCM5718C:       return "5718C";
15164         case TG3_PHY_ID_BCM5718S:       return "5718S";
15165         case TG3_PHY_ID_BCM57765:       return "57765";
15166         case TG3_PHY_ID_BCM5719C:       return "5719C";
15167         case TG3_PHY_ID_BCM5720C:       return "5720C";
15168         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15169         case 0:                 return "serdes";
15170         default:                return "unknown";
15171         }
15172 }
15173
15174 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15175 {
15176         if (tg3_flag(tp, PCI_EXPRESS)) {
15177                 strcpy(str, "PCI Express");
15178                 return str;
15179         } else if (tg3_flag(tp, PCIX_MODE)) {
15180                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15181
15182                 strcpy(str, "PCIX:");
15183
15184                 if ((clock_ctrl == 7) ||
15185                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15186                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15187                         strcat(str, "133MHz");
15188                 else if (clock_ctrl == 0)
15189                         strcat(str, "33MHz");
15190                 else if (clock_ctrl == 2)
15191                         strcat(str, "50MHz");
15192                 else if (clock_ctrl == 4)
15193                         strcat(str, "66MHz");
15194                 else if (clock_ctrl == 6)
15195                         strcat(str, "100MHz");
15196         } else {
15197                 strcpy(str, "PCI:");
15198                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15199                         strcat(str, "66MHz");
15200                 else
15201                         strcat(str, "33MHz");
15202         }
15203         if (tg3_flag(tp, PCI_32BIT))
15204                 strcat(str, ":32-bit");
15205         else
15206                 strcat(str, ":64-bit");
15207         return str;
15208 }
15209
15210 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15211 {
15212         struct pci_dev *peer;
15213         unsigned int func, devnr = tp->pdev->devfn & ~7;
15214
15215         for (func = 0; func < 8; func++) {
15216                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15217                 if (peer && peer != tp->pdev)
15218                         break;
15219                 pci_dev_put(peer);
15220         }
15221         /* 5704 can be configured in single-port mode, set peer to
15222          * tp->pdev in that case.
15223          */
15224         if (!peer) {
15225                 peer = tp->pdev;
15226                 return peer;
15227         }
15228
15229         /*
15230          * We don't need to keep the refcount elevated; there's no way
15231          * to remove one half of this device without removing the other
15232          */
15233         pci_dev_put(peer);
15234
15235         return peer;
15236 }
15237
15238 static void __devinit tg3_init_coal(struct tg3 *tp)
15239 {
15240         struct ethtool_coalesce *ec = &tp->coal;
15241
15242         memset(ec, 0, sizeof(*ec));
15243         ec->cmd = ETHTOOL_GCOALESCE;
15244         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15245         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15246         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15247         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15248         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15249         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15250         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15251         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15252         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15253
15254         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15255                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15256                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15257                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15258                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15259                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15260         }
15261
15262         if (tg3_flag(tp, 5705_PLUS)) {
15263                 ec->rx_coalesce_usecs_irq = 0;
15264                 ec->tx_coalesce_usecs_irq = 0;
15265                 ec->stats_block_coalesce_usecs = 0;
15266         }
15267 }
15268
15269 static const struct net_device_ops tg3_netdev_ops = {
15270         .ndo_open               = tg3_open,
15271         .ndo_stop               = tg3_close,
15272         .ndo_start_xmit         = tg3_start_xmit,
15273         .ndo_get_stats64        = tg3_get_stats64,
15274         .ndo_validate_addr      = eth_validate_addr,
15275         .ndo_set_rx_mode        = tg3_set_rx_mode,
15276         .ndo_set_mac_address    = tg3_set_mac_addr,
15277         .ndo_do_ioctl           = tg3_ioctl,
15278         .ndo_tx_timeout         = tg3_tx_timeout,
15279         .ndo_change_mtu         = tg3_change_mtu,
15280         .ndo_fix_features       = tg3_fix_features,
15281         .ndo_set_features       = tg3_set_features,
15282 #ifdef CONFIG_NET_POLL_CONTROLLER
15283         .ndo_poll_controller    = tg3_poll_controller,
15284 #endif
15285 };
15286
15287 static int __devinit tg3_init_one(struct pci_dev *pdev,
15288                                   const struct pci_device_id *ent)
15289 {
15290         struct net_device *dev;
15291         struct tg3 *tp;
15292         int i, err, pm_cap;
15293         u32 sndmbx, rcvmbx, intmbx;
15294         char str[40];
15295         u64 dma_mask, persist_dma_mask;
15296         u32 features = 0;
15297
15298         printk_once(KERN_INFO "%s\n", version);
15299
15300         err = pci_enable_device(pdev);
15301         if (err) {
15302                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15303                 return err;
15304         }
15305
15306         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15307         if (err) {
15308                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15309                 goto err_out_disable_pdev;
15310         }
15311
15312         pci_set_master(pdev);
15313
15314         /* Find power-management capability. */
15315         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15316         if (pm_cap == 0) {
15317                 dev_err(&pdev->dev,
15318                         "Cannot find Power Management capability, aborting\n");
15319                 err = -EIO;
15320                 goto err_out_free_res;
15321         }
15322
15323         err = pci_set_power_state(pdev, PCI_D0);
15324         if (err) {
15325                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15326                 goto err_out_free_res;
15327         }
15328
15329         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15330         if (!dev) {
15331                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15332                 err = -ENOMEM;
15333                 goto err_out_power_down;
15334         }
15335
15336         SET_NETDEV_DEV(dev, &pdev->dev);
15337
15338         tp = netdev_priv(dev);
15339         tp->pdev = pdev;
15340         tp->dev = dev;
15341         tp->pm_cap = pm_cap;
15342         tp->rx_mode = TG3_DEF_RX_MODE;
15343         tp->tx_mode = TG3_DEF_TX_MODE;
15344
15345         if (tg3_debug > 0)
15346                 tp->msg_enable = tg3_debug;
15347         else
15348                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15349
15350         /* The word/byte swap controls here control register access byte
15351          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15352          * setting below.
15353          */
15354         tp->misc_host_ctrl =
15355                 MISC_HOST_CTRL_MASK_PCI_INT |
15356                 MISC_HOST_CTRL_WORD_SWAP |
15357                 MISC_HOST_CTRL_INDIR_ACCESS |
15358                 MISC_HOST_CTRL_PCISTATE_RW;
15359
15360         /* The NONFRM (non-frame) byte/word swap controls take effect
15361          * on descriptor entries, anything which isn't packet data.
15362          *
15363          * The StrongARM chips on the board (one for tx, one for rx)
15364          * are running in big-endian mode.
15365          */
15366         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15367                         GRC_MODE_WSWAP_NONFRM_DATA);
15368 #ifdef __BIG_ENDIAN
15369         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15370 #endif
15371         spin_lock_init(&tp->lock);
15372         spin_lock_init(&tp->indirect_lock);
15373         INIT_WORK(&tp->reset_task, tg3_reset_task);
15374
15375         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15376         if (!tp->regs) {
15377                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15378                 err = -ENOMEM;
15379                 goto err_out_free_dev;
15380         }
15381
15382         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15383             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15384             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15385             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15386             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15387             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15388             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15389             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15390                 tg3_flag_set(tp, ENABLE_APE);
15391                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15392                 if (!tp->aperegs) {
15393                         dev_err(&pdev->dev,
15394                                 "Cannot map APE registers, aborting\n");
15395                         err = -ENOMEM;
15396                         goto err_out_iounmap;
15397                 }
15398         }
15399
15400         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15401         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15402
15403         dev->ethtool_ops = &tg3_ethtool_ops;
15404         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15405         dev->netdev_ops = &tg3_netdev_ops;
15406         dev->irq = pdev->irq;
15407
15408         err = tg3_get_invariants(tp);
15409         if (err) {
15410                 dev_err(&pdev->dev,
15411                         "Problem fetching invariants of chip, aborting\n");
15412                 goto err_out_apeunmap;
15413         }
15414
15415         /* The EPB bridge inside 5714, 5715, and 5780 and any
15416          * device behind the EPB cannot support DMA addresses > 40-bit.
15417          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15418          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15419          * do DMA address check in tg3_start_xmit().
15420          */
15421         if (tg3_flag(tp, IS_5788))
15422                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15423         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15424                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15425 #ifdef CONFIG_HIGHMEM
15426                 dma_mask = DMA_BIT_MASK(64);
15427 #endif
15428         } else
15429                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15430
15431         /* Configure DMA attributes. */
15432         if (dma_mask > DMA_BIT_MASK(32)) {
15433                 err = pci_set_dma_mask(pdev, dma_mask);
15434                 if (!err) {
15435                         features |= NETIF_F_HIGHDMA;
15436                         err = pci_set_consistent_dma_mask(pdev,
15437                                                           persist_dma_mask);
15438                         if (err < 0) {
15439                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15440                                         "DMA for consistent allocations\n");
15441                                 goto err_out_apeunmap;
15442                         }
15443                 }
15444         }
15445         if (err || dma_mask == DMA_BIT_MASK(32)) {
15446                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15447                 if (err) {
15448                         dev_err(&pdev->dev,
15449                                 "No usable DMA configuration, aborting\n");
15450                         goto err_out_apeunmap;
15451                 }
15452         }
15453
15454         tg3_init_bufmgr_config(tp);
15455
15456         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15457
15458         /* 5700 B0 chips do not support checksumming correctly due
15459          * to hardware bugs.
15460          */
15461         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15462                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15463
15464                 if (tg3_flag(tp, 5755_PLUS))
15465                         features |= NETIF_F_IPV6_CSUM;
15466         }
15467
15468         /* TSO is on by default on chips that support hardware TSO.
15469          * Firmware TSO on older chips gives lower performance, so it
15470          * is off by default, but can be enabled using ethtool.
15471          */
15472         if ((tg3_flag(tp, HW_TSO_1) ||
15473              tg3_flag(tp, HW_TSO_2) ||
15474              tg3_flag(tp, HW_TSO_3)) &&
15475             (features & NETIF_F_IP_CSUM))
15476                 features |= NETIF_F_TSO;
15477         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15478                 if (features & NETIF_F_IPV6_CSUM)
15479                         features |= NETIF_F_TSO6;
15480                 if (tg3_flag(tp, HW_TSO_3) ||
15481                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15482                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15483                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15484                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15485                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15486                         features |= NETIF_F_TSO_ECN;
15487         }
15488
15489         dev->features |= features;
15490         dev->vlan_features |= features;
15491
15492         /*
15493          * Add loopback capability only for a subset of devices that support
15494          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15495          * loopback for the remaining devices.
15496          */
15497         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15498             !tg3_flag(tp, CPMU_PRESENT))
15499                 /* Add the loopback capability */
15500                 features |= NETIF_F_LOOPBACK;
15501
15502         dev->hw_features |= features;
15503
15504         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15505             !tg3_flag(tp, TSO_CAPABLE) &&
15506             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15507                 tg3_flag_set(tp, MAX_RXPEND_64);
15508                 tp->rx_pending = 63;
15509         }
15510
15511         err = tg3_get_device_address(tp);
15512         if (err) {
15513                 dev_err(&pdev->dev,
15514                         "Could not obtain valid ethernet address, aborting\n");
15515                 goto err_out_apeunmap;
15516         }
15517
15518         /*
15519          * Reset chip in case UNDI or EFI driver did not shutdown
15520          * DMA self test will enable WDMAC and we'll see (spurious)
15521          * pending DMA on the PCI bus at that point.
15522          */
15523         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15524             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15525                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15526                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15527         }
15528
15529         err = tg3_test_dma(tp);
15530         if (err) {
15531                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15532                 goto err_out_apeunmap;
15533         }
15534
15535         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15536         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15537         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15538         for (i = 0; i < tp->irq_max; i++) {
15539                 struct tg3_napi *tnapi = &tp->napi[i];
15540
15541                 tnapi->tp = tp;
15542                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15543
15544                 tnapi->int_mbox = intmbx;
15545                 if (i < 4)
15546                         intmbx += 0x8;
15547                 else
15548                         intmbx += 0x4;
15549
15550                 tnapi->consmbox = rcvmbx;
15551                 tnapi->prodmbox = sndmbx;
15552
15553                 if (i)
15554                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15555                 else
15556                         tnapi->coal_now = HOSTCC_MODE_NOW;
15557
15558                 if (!tg3_flag(tp, SUPPORT_MSIX))
15559                         break;
15560
15561                 /*
15562                  * If we support MSIX, we'll be using RSS.  If we're using
15563                  * RSS, the first vector only handles link interrupts and the
15564                  * remaining vectors handle rx and tx interrupts.  Reuse the
15565                  * mailbox values for the next iteration.  The values we setup
15566                  * above are still useful for the single vectored mode.
15567                  */
15568                 if (!i)
15569                         continue;
15570
15571                 rcvmbx += 0x8;
15572
15573                 if (sndmbx & 0x4)
15574                         sndmbx -= 0x4;
15575                 else
15576                         sndmbx += 0xc;
15577         }
15578
15579         tg3_init_coal(tp);
15580
15581         pci_set_drvdata(pdev, dev);
15582
15583         if (tg3_flag(tp, 5717_PLUS)) {
15584                 /* Resume a low-power mode */
15585                 tg3_frob_aux_power(tp, false);
15586         }
15587
15588         err = register_netdev(dev);
15589         if (err) {
15590                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15591                 goto err_out_apeunmap;
15592         }
15593
15594         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15595                     tp->board_part_number,
15596                     tp->pci_chip_rev_id,
15597                     tg3_bus_string(tp, str),
15598                     dev->dev_addr);
15599
15600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15601                 struct phy_device *phydev;
15602                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15603                 netdev_info(dev,
15604                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15605                             phydev->drv->name, dev_name(&phydev->dev));
15606         } else {
15607                 char *ethtype;
15608
15609                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15610                         ethtype = "10/100Base-TX";
15611                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15612                         ethtype = "1000Base-SX";
15613                 else
15614                         ethtype = "10/100/1000Base-T";
15615
15616                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15617                             "(WireSpeed[%d], EEE[%d])\n",
15618                             tg3_phy_string(tp), ethtype,
15619                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15620                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15621         }
15622
15623         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15624                     (dev->features & NETIF_F_RXCSUM) != 0,
15625                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15626                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15627                     tg3_flag(tp, ENABLE_ASF) != 0,
15628                     tg3_flag(tp, TSO_CAPABLE) != 0);
15629         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15630                     tp->dma_rwctrl,
15631                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15632                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15633
15634         pci_save_state(pdev);
15635
15636         return 0;
15637
15638 err_out_apeunmap:
15639         if (tp->aperegs) {
15640                 iounmap(tp->aperegs);
15641                 tp->aperegs = NULL;
15642         }
15643
15644 err_out_iounmap:
15645         if (tp->regs) {
15646                 iounmap(tp->regs);
15647                 tp->regs = NULL;
15648         }
15649
15650 err_out_free_dev:
15651         free_netdev(dev);
15652
15653 err_out_power_down:
15654         pci_set_power_state(pdev, PCI_D3hot);
15655
15656 err_out_free_res:
15657         pci_release_regions(pdev);
15658
15659 err_out_disable_pdev:
15660         pci_disable_device(pdev);
15661         pci_set_drvdata(pdev, NULL);
15662         return err;
15663 }
15664
15665 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15666 {
15667         struct net_device *dev = pci_get_drvdata(pdev);
15668
15669         if (dev) {
15670                 struct tg3 *tp = netdev_priv(dev);
15671
15672                 if (tp->fw)
15673                         release_firmware(tp->fw);
15674
15675                 cancel_work_sync(&tp->reset_task);
15676
15677                 if (!tg3_flag(tp, USE_PHYLIB)) {
15678                         tg3_phy_fini(tp);
15679                         tg3_mdio_fini(tp);
15680                 }
15681
15682                 unregister_netdev(dev);
15683                 if (tp->aperegs) {
15684                         iounmap(tp->aperegs);
15685                         tp->aperegs = NULL;
15686                 }
15687                 if (tp->regs) {
15688                         iounmap(tp->regs);
15689                         tp->regs = NULL;
15690                 }
15691                 free_netdev(dev);
15692                 pci_release_regions(pdev);
15693                 pci_disable_device(pdev);
15694                 pci_set_drvdata(pdev, NULL);
15695         }
15696 }
15697
15698 #ifdef CONFIG_PM_SLEEP
15699 static int tg3_suspend(struct device *device)
15700 {
15701         struct pci_dev *pdev = to_pci_dev(device);
15702         struct net_device *dev = pci_get_drvdata(pdev);
15703         struct tg3 *tp = netdev_priv(dev);
15704         int err;
15705
15706         if (!netif_running(dev))
15707                 return 0;
15708
15709         flush_work_sync(&tp->reset_task);
15710         tg3_phy_stop(tp);
15711         tg3_netif_stop(tp);
15712
15713         del_timer_sync(&tp->timer);
15714
15715         tg3_full_lock(tp, 1);
15716         tg3_disable_ints(tp);
15717         tg3_full_unlock(tp);
15718
15719         netif_device_detach(dev);
15720
15721         tg3_full_lock(tp, 0);
15722         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15723         tg3_flag_clear(tp, INIT_COMPLETE);
15724         tg3_full_unlock(tp);
15725
15726         err = tg3_power_down_prepare(tp);
15727         if (err) {
15728                 int err2;
15729
15730                 tg3_full_lock(tp, 0);
15731
15732                 tg3_flag_set(tp, INIT_COMPLETE);
15733                 err2 = tg3_restart_hw(tp, 1);
15734                 if (err2)
15735                         goto out;
15736
15737                 tp->timer.expires = jiffies + tp->timer_offset;
15738                 add_timer(&tp->timer);
15739
15740                 netif_device_attach(dev);
15741                 tg3_netif_start(tp);
15742
15743 out:
15744                 tg3_full_unlock(tp);
15745
15746                 if (!err2)
15747                         tg3_phy_start(tp);
15748         }
15749
15750         return err;
15751 }
15752
15753 static int tg3_resume(struct device *device)
15754 {
15755         struct pci_dev *pdev = to_pci_dev(device);
15756         struct net_device *dev = pci_get_drvdata(pdev);
15757         struct tg3 *tp = netdev_priv(dev);
15758         int err;
15759
15760         if (!netif_running(dev))
15761                 return 0;
15762
15763         netif_device_attach(dev);
15764
15765         tg3_full_lock(tp, 0);
15766
15767         tg3_flag_set(tp, INIT_COMPLETE);
15768         err = tg3_restart_hw(tp, 1);
15769         if (err)
15770                 goto out;
15771
15772         tp->timer.expires = jiffies + tp->timer_offset;
15773         add_timer(&tp->timer);
15774
15775         tg3_netif_start(tp);
15776
15777 out:
15778         tg3_full_unlock(tp);
15779
15780         if (!err)
15781                 tg3_phy_start(tp);
15782
15783         return err;
15784 }
15785
15786 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15787 #define TG3_PM_OPS (&tg3_pm_ops)
15788
15789 #else
15790
15791 #define TG3_PM_OPS NULL
15792
15793 #endif /* CONFIG_PM_SLEEP */
15794
15795 /**
15796  * tg3_io_error_detected - called when PCI error is detected
15797  * @pdev: Pointer to PCI device
15798  * @state: The current pci connection state
15799  *
15800  * This function is called after a PCI bus error affecting
15801  * this device has been detected.
15802  */
15803 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15804                                               pci_channel_state_t state)
15805 {
15806         struct net_device *netdev = pci_get_drvdata(pdev);
15807         struct tg3 *tp = netdev_priv(netdev);
15808         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15809
15810         netdev_info(netdev, "PCI I/O error detected\n");
15811
15812         rtnl_lock();
15813
15814         if (!netif_running(netdev))
15815                 goto done;
15816
15817         tg3_phy_stop(tp);
15818
15819         tg3_netif_stop(tp);
15820
15821         del_timer_sync(&tp->timer);
15822         tg3_flag_clear(tp, RESTART_TIMER);
15823
15824         /* Want to make sure that the reset task doesn't run */
15825         cancel_work_sync(&tp->reset_task);
15826         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15827         tg3_flag_clear(tp, RESTART_TIMER);
15828
15829         netif_device_detach(netdev);
15830
15831         /* Clean up software state, even if MMIO is blocked */
15832         tg3_full_lock(tp, 0);
15833         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15834         tg3_full_unlock(tp);
15835
15836 done:
15837         if (state == pci_channel_io_perm_failure)
15838                 err = PCI_ERS_RESULT_DISCONNECT;
15839         else
15840                 pci_disable_device(pdev);
15841
15842         rtnl_unlock();
15843
15844         return err;
15845 }
15846
15847 /**
15848  * tg3_io_slot_reset - called after the pci bus has been reset.
15849  * @pdev: Pointer to PCI device
15850  *
15851  * Restart the card from scratch, as if from a cold-boot.
15852  * At this point, the card has exprienced a hard reset,
15853  * followed by fixups by BIOS, and has its config space
15854  * set up identically to what it was at cold boot.
15855  */
15856 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15857 {
15858         struct net_device *netdev = pci_get_drvdata(pdev);
15859         struct tg3 *tp = netdev_priv(netdev);
15860         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15861         int err;
15862
15863         rtnl_lock();
15864
15865         if (pci_enable_device(pdev)) {
15866                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15867                 goto done;
15868         }
15869
15870         pci_set_master(pdev);
15871         pci_restore_state(pdev);
15872         pci_save_state(pdev);
15873
15874         if (!netif_running(netdev)) {
15875                 rc = PCI_ERS_RESULT_RECOVERED;
15876                 goto done;
15877         }
15878
15879         err = tg3_power_up(tp);
15880         if (err)
15881                 goto done;
15882
15883         rc = PCI_ERS_RESULT_RECOVERED;
15884
15885 done:
15886         rtnl_unlock();
15887
15888         return rc;
15889 }
15890
15891 /**
15892  * tg3_io_resume - called when traffic can start flowing again.
15893  * @pdev: Pointer to PCI device
15894  *
15895  * This callback is called when the error recovery driver tells
15896  * us that its OK to resume normal operation.
15897  */
15898 static void tg3_io_resume(struct pci_dev *pdev)
15899 {
15900         struct net_device *netdev = pci_get_drvdata(pdev);
15901         struct tg3 *tp = netdev_priv(netdev);
15902         int err;
15903
15904         rtnl_lock();
15905
15906         if (!netif_running(netdev))
15907                 goto done;
15908
15909         tg3_full_lock(tp, 0);
15910         tg3_flag_set(tp, INIT_COMPLETE);
15911         err = tg3_restart_hw(tp, 1);
15912         tg3_full_unlock(tp);
15913         if (err) {
15914                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15915                 goto done;
15916         }
15917
15918         netif_device_attach(netdev);
15919
15920         tp->timer.expires = jiffies + tp->timer_offset;
15921         add_timer(&tp->timer);
15922
15923         tg3_netif_start(tp);
15924
15925         tg3_phy_start(tp);
15926
15927 done:
15928         rtnl_unlock();
15929 }
15930
15931 static struct pci_error_handlers tg3_err_handler = {
15932         .error_detected = tg3_io_error_detected,
15933         .slot_reset     = tg3_io_slot_reset,
15934         .resume         = tg3_io_resume
15935 };
15936
15937 static struct pci_driver tg3_driver = {
15938         .name           = DRV_MODULE_NAME,
15939         .id_table       = tg3_pci_tbl,
15940         .probe          = tg3_init_one,
15941         .remove         = __devexit_p(tg3_remove_one),
15942         .err_handler    = &tg3_err_handler,
15943         .driver.pm      = TG3_PM_OPS,
15944 };
15945
15946 static int __init tg3_init(void)
15947 {
15948         return pci_register_driver(&tg3_driver);
15949 }
15950
15951 static void __exit tg3_cleanup(void)
15952 {
15953         pci_unregister_driver(&tg3_driver);
15954 }
15955
15956 module_init(tg3_init);
15957 module_exit(tg3_cleanup);