c77c462bcee5a91d01cf62670b65deb4e0aacb37
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     121
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "November 2, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882
883         /* check for TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885                 work_exists = 1;
886
887         /* check for RX work to do */
888         if (tnapi->rx_rcb_prod_idx &&
889             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
890                 work_exists = 1;
891
892         return work_exists;
893 }
894
895 /* tg3_int_reenable
896  *  similar to tg3_enable_ints, but it accurately determines whether there
897  *  is new work pending and can return without flushing the PIO write
898  *  which reenables interrupts
899  */
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
901 {
902         struct tg3 *tp = tnapi->tp;
903
904         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
905         mmiowb();
906
907         /* When doing tagged status, this work check is unnecessary.
908          * The last_tag we write above tells the chip which piece of
909          * work we've completed.
910          */
911         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912                 tw32(HOSTCC_MODE, tp->coalesce_mode |
913                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
914 }
915
916 static void tg3_switch_clocks(struct tg3 *tp)
917 {
918         u32 clock_ctrl;
919         u32 orig_clock_ctrl;
920
921         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
922                 return;
923
924         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
925
926         orig_clock_ctrl = clock_ctrl;
927         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928                        CLOCK_CTRL_CLKRUN_OENABLE |
929                        0x1f);
930         tp->pci_clock_ctrl = clock_ctrl;
931
932         if (tg3_flag(tp, 5705_PLUS)) {
933                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
936                 }
937         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl |
940                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
941                             40);
942                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
944                             40);
945         }
946         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
947 }
948
949 #define PHY_BUSY_LOOPS  5000
950
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
952 {
953         u32 frame_val;
954         unsigned int loops;
955         int ret;
956
957         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
958                 tw32_f(MAC_MI_MODE,
959                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
960                 udelay(80);
961         }
962
963         *val = 0x0;
964
965         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966                       MI_COM_PHY_ADDR_MASK);
967         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968                       MI_COM_REG_ADDR_MASK);
969         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
970
971         tw32_f(MAC_MI_COM, frame_val);
972
973         loops = PHY_BUSY_LOOPS;
974         while (loops != 0) {
975                 udelay(10);
976                 frame_val = tr32(MAC_MI_COM);
977
978                 if ((frame_val & MI_COM_BUSY) == 0) {
979                         udelay(5);
980                         frame_val = tr32(MAC_MI_COM);
981                         break;
982                 }
983                 loops -= 1;
984         }
985
986         ret = -EBUSY;
987         if (loops != 0) {
988                 *val = frame_val & MI_COM_DATA_MASK;
989                 ret = 0;
990         }
991
992         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993                 tw32_f(MAC_MI_MODE, tp->mi_mode);
994                 udelay(80);
995         }
996
997         return ret;
998 }
999
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1001 {
1002         u32 frame_val;
1003         unsigned int loops;
1004         int ret;
1005
1006         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1008                 return 0;
1009
1010         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1011                 tw32_f(MAC_MI_MODE,
1012                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1013                 udelay(80);
1014         }
1015
1016         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017                       MI_COM_PHY_ADDR_MASK);
1018         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019                       MI_COM_REG_ADDR_MASK);
1020         frame_val |= (val & MI_COM_DATA_MASK);
1021         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1022
1023         tw32_f(MAC_MI_COM, frame_val);
1024
1025         loops = PHY_BUSY_LOOPS;
1026         while (loops != 0) {
1027                 udelay(10);
1028                 frame_val = tr32(MAC_MI_COM);
1029                 if ((frame_val & MI_COM_BUSY) == 0) {
1030                         udelay(5);
1031                         frame_val = tr32(MAC_MI_COM);
1032                         break;
1033                 }
1034                 loops -= 1;
1035         }
1036
1037         ret = -EBUSY;
1038         if (loops != 0)
1039                 ret = 0;
1040
1041         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1043                 udelay(80);
1044         }
1045
1046         return ret;
1047 }
1048
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1050 {
1051         int err;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1063         if (err)
1064                 goto done;
1065
1066         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1067
1068 done:
1069         return err;
1070 }
1071
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1073 {
1074         int err;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1086         if (err)
1087                 goto done;
1088
1089         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1090
1091 done:
1092         return err;
1093 }
1094
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1096 {
1097         int err;
1098
1099         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1100         if (!err)
1101                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102
1103         return err;
1104 }
1105
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1107 {
1108         int err;
1109
1110         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1111         if (!err)
1112                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113
1114         return err;
1115 }
1116
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1118 {
1119         int err;
1120
1121         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1124         if (!err)
1125                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126
1127         return err;
1128 }
1129
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1131 {
1132         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133                 set |= MII_TG3_AUXCTL_MISC_WREN;
1134
1135         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1136 }
1137
1138 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1139 {
1140         u32 val;
1141         int err;
1142
1143         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1144
1145         if (err)
1146                 return err;
1147         if (enable)
1148
1149                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1150         else
1151                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1152
1153         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1154                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1155
1156         return err;
1157 }
1158
1159 static int tg3_bmcr_reset(struct tg3 *tp)
1160 {
1161         u32 phy_control;
1162         int limit, err;
1163
1164         /* OK, reset it, and poll the BMCR_RESET bit until it
1165          * clears or we time out.
1166          */
1167         phy_control = BMCR_RESET;
1168         err = tg3_writephy(tp, MII_BMCR, phy_control);
1169         if (err != 0)
1170                 return -EBUSY;
1171
1172         limit = 5000;
1173         while (limit--) {
1174                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1175                 if (err != 0)
1176                         return -EBUSY;
1177
1178                 if ((phy_control & BMCR_RESET) == 0) {
1179                         udelay(40);
1180                         break;
1181                 }
1182                 udelay(10);
1183         }
1184         if (limit < 0)
1185                 return -EBUSY;
1186
1187         return 0;
1188 }
1189
1190 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1191 {
1192         struct tg3 *tp = bp->priv;
1193         u32 val;
1194
1195         spin_lock_bh(&tp->lock);
1196
1197         if (tg3_readphy(tp, reg, &val))
1198                 val = -EIO;
1199
1200         spin_unlock_bh(&tp->lock);
1201
1202         return val;
1203 }
1204
1205 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1206 {
1207         struct tg3 *tp = bp->priv;
1208         u32 ret = 0;
1209
1210         spin_lock_bh(&tp->lock);
1211
1212         if (tg3_writephy(tp, reg, val))
1213                 ret = -EIO;
1214
1215         spin_unlock_bh(&tp->lock);
1216
1217         return ret;
1218 }
1219
1220 static int tg3_mdio_reset(struct mii_bus *bp)
1221 {
1222         return 0;
1223 }
1224
1225 static void tg3_mdio_config_5785(struct tg3 *tp)
1226 {
1227         u32 val;
1228         struct phy_device *phydev;
1229
1230         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1231         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1232         case PHY_ID_BCM50610:
1233         case PHY_ID_BCM50610M:
1234                 val = MAC_PHYCFG2_50610_LED_MODES;
1235                 break;
1236         case PHY_ID_BCMAC131:
1237                 val = MAC_PHYCFG2_AC131_LED_MODES;
1238                 break;
1239         case PHY_ID_RTL8211C:
1240                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1241                 break;
1242         case PHY_ID_RTL8201E:
1243                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1244                 break;
1245         default:
1246                 return;
1247         }
1248
1249         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1250                 tw32(MAC_PHYCFG2, val);
1251
1252                 val = tr32(MAC_PHYCFG1);
1253                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1254                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1255                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1256                 tw32(MAC_PHYCFG1, val);
1257
1258                 return;
1259         }
1260
1261         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1262                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1263                        MAC_PHYCFG2_FMODE_MASK_MASK |
1264                        MAC_PHYCFG2_GMODE_MASK_MASK |
1265                        MAC_PHYCFG2_ACT_MASK_MASK   |
1266                        MAC_PHYCFG2_QUAL_MASK_MASK |
1267                        MAC_PHYCFG2_INBAND_ENABLE;
1268
1269         tw32(MAC_PHYCFG2, val);
1270
1271         val = tr32(MAC_PHYCFG1);
1272         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1273                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1274         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1275                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1276                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1277                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1278                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1279         }
1280         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1281                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1282         tw32(MAC_PHYCFG1, val);
1283
1284         val = tr32(MAC_EXT_RGMII_MODE);
1285         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1286                  MAC_RGMII_MODE_RX_QUALITY |
1287                  MAC_RGMII_MODE_RX_ACTIVITY |
1288                  MAC_RGMII_MODE_RX_ENG_DET |
1289                  MAC_RGMII_MODE_TX_ENABLE |
1290                  MAC_RGMII_MODE_TX_LOWPWR |
1291                  MAC_RGMII_MODE_TX_RESET);
1292         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1293                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1294                         val |= MAC_RGMII_MODE_RX_INT_B |
1295                                MAC_RGMII_MODE_RX_QUALITY |
1296                                MAC_RGMII_MODE_RX_ACTIVITY |
1297                                MAC_RGMII_MODE_RX_ENG_DET;
1298                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1299                         val |= MAC_RGMII_MODE_TX_ENABLE |
1300                                MAC_RGMII_MODE_TX_LOWPWR |
1301                                MAC_RGMII_MODE_TX_RESET;
1302         }
1303         tw32(MAC_EXT_RGMII_MODE, val);
1304 }
1305
1306 static void tg3_mdio_start(struct tg3 *tp)
1307 {
1308         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1309         tw32_f(MAC_MI_MODE, tp->mi_mode);
1310         udelay(80);
1311
1312         if (tg3_flag(tp, MDIOBUS_INITED) &&
1313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1314                 tg3_mdio_config_5785(tp);
1315 }
1316
1317 static int tg3_mdio_init(struct tg3 *tp)
1318 {
1319         int i;
1320         u32 reg;
1321         struct phy_device *phydev;
1322
1323         if (tg3_flag(tp, 5717_PLUS)) {
1324                 u32 is_serdes;
1325
1326                 tp->phy_addr = tp->pci_fn + 1;
1327
1328                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1329                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1330                 else
1331                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1332                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1333                 if (is_serdes)
1334                         tp->phy_addr += 7;
1335         } else
1336                 tp->phy_addr = TG3_PHY_MII_ADDR;
1337
1338         tg3_mdio_start(tp);
1339
1340         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1341                 return 0;
1342
1343         tp->mdio_bus = mdiobus_alloc();
1344         if (tp->mdio_bus == NULL)
1345                 return -ENOMEM;
1346
1347         tp->mdio_bus->name     = "tg3 mdio bus";
1348         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1349                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1350         tp->mdio_bus->priv     = tp;
1351         tp->mdio_bus->parent   = &tp->pdev->dev;
1352         tp->mdio_bus->read     = &tg3_mdio_read;
1353         tp->mdio_bus->write    = &tg3_mdio_write;
1354         tp->mdio_bus->reset    = &tg3_mdio_reset;
1355         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1356         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1357
1358         for (i = 0; i < PHY_MAX_ADDR; i++)
1359                 tp->mdio_bus->irq[i] = PHY_POLL;
1360
1361         /* The bus registration will look for all the PHYs on the mdio bus.
1362          * Unfortunately, it does not ensure the PHY is powered up before
1363          * accessing the PHY ID registers.  A chip reset is the
1364          * quickest way to bring the device back to an operational state..
1365          */
1366         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1367                 tg3_bmcr_reset(tp);
1368
1369         i = mdiobus_register(tp->mdio_bus);
1370         if (i) {
1371                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1372                 mdiobus_free(tp->mdio_bus);
1373                 return i;
1374         }
1375
1376         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1377
1378         if (!phydev || !phydev->drv) {
1379                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1380                 mdiobus_unregister(tp->mdio_bus);
1381                 mdiobus_free(tp->mdio_bus);
1382                 return -ENODEV;
1383         }
1384
1385         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1386         case PHY_ID_BCM57780:
1387                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1388                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1389                 break;
1390         case PHY_ID_BCM50610:
1391         case PHY_ID_BCM50610M:
1392                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1393                                      PHY_BRCM_RX_REFCLK_UNUSED |
1394                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1395                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1396                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1397                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1399                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1400                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1401                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1402                 /* fallthru */
1403         case PHY_ID_RTL8211C:
1404                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1405                 break;
1406         case PHY_ID_RTL8201E:
1407         case PHY_ID_BCMAC131:
1408                 phydev->interface = PHY_INTERFACE_MODE_MII;
1409                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1410                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1411                 break;
1412         }
1413
1414         tg3_flag_set(tp, MDIOBUS_INITED);
1415
1416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1417                 tg3_mdio_config_5785(tp);
1418
1419         return 0;
1420 }
1421
1422 static void tg3_mdio_fini(struct tg3 *tp)
1423 {
1424         if (tg3_flag(tp, MDIOBUS_INITED)) {
1425                 tg3_flag_clear(tp, MDIOBUS_INITED);
1426                 mdiobus_unregister(tp->mdio_bus);
1427                 mdiobus_free(tp->mdio_bus);
1428         }
1429 }
1430
1431 /* tp->lock is held. */
1432 static inline void tg3_generate_fw_event(struct tg3 *tp)
1433 {
1434         u32 val;
1435
1436         val = tr32(GRC_RX_CPU_EVENT);
1437         val |= GRC_RX_CPU_DRIVER_EVENT;
1438         tw32_f(GRC_RX_CPU_EVENT, val);
1439
1440         tp->last_event_jiffies = jiffies;
1441 }
1442
1443 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1444
1445 /* tp->lock is held. */
1446 static void tg3_wait_for_event_ack(struct tg3 *tp)
1447 {
1448         int i;
1449         unsigned int delay_cnt;
1450         long time_remain;
1451
1452         /* If enough time has passed, no wait is necessary. */
1453         time_remain = (long)(tp->last_event_jiffies + 1 +
1454                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1455                       (long)jiffies;
1456         if (time_remain < 0)
1457                 return;
1458
1459         /* Check if we can shorten the wait time. */
1460         delay_cnt = jiffies_to_usecs(time_remain);
1461         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1462                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1463         delay_cnt = (delay_cnt >> 3) + 1;
1464
1465         for (i = 0; i < delay_cnt; i++) {
1466                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1467                         break;
1468                 udelay(8);
1469         }
1470 }
1471
1472 /* tp->lock is held. */
1473 static void tg3_ump_link_report(struct tg3 *tp)
1474 {
1475         u32 reg;
1476         u32 val;
1477
1478         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1479                 return;
1480
1481         tg3_wait_for_event_ack(tp);
1482
1483         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1484
1485         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1486
1487         val = 0;
1488         if (!tg3_readphy(tp, MII_BMCR, &reg))
1489                 val = reg << 16;
1490         if (!tg3_readphy(tp, MII_BMSR, &reg))
1491                 val |= (reg & 0xffff);
1492         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1493
1494         val = 0;
1495         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1496                 val = reg << 16;
1497         if (!tg3_readphy(tp, MII_LPA, &reg))
1498                 val |= (reg & 0xffff);
1499         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1500
1501         val = 0;
1502         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1503                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1504                         val = reg << 16;
1505                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1506                         val |= (reg & 0xffff);
1507         }
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1509
1510         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1511                 val = reg << 16;
1512         else
1513                 val = 0;
1514         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1515
1516         tg3_generate_fw_event(tp);
1517 }
1518
1519 /* tp->lock is held. */
1520 static void tg3_stop_fw(struct tg3 *tp)
1521 {
1522         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1523                 /* Wait for RX cpu to ACK the previous event. */
1524                 tg3_wait_for_event_ack(tp);
1525
1526                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1527
1528                 tg3_generate_fw_event(tp);
1529
1530                 /* Wait for RX cpu to ACK this event. */
1531                 tg3_wait_for_event_ack(tp);
1532         }
1533 }
1534
1535 /* tp->lock is held. */
1536 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1537 {
1538         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1539                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1540
1541         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1542                 switch (kind) {
1543                 case RESET_KIND_INIT:
1544                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1545                                       DRV_STATE_START);
1546                         break;
1547
1548                 case RESET_KIND_SHUTDOWN:
1549                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1550                                       DRV_STATE_UNLOAD);
1551                         break;
1552
1553                 case RESET_KIND_SUSPEND:
1554                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1555                                       DRV_STATE_SUSPEND);
1556                         break;
1557
1558                 default:
1559                         break;
1560                 }
1561         }
1562
1563         if (kind == RESET_KIND_INIT ||
1564             kind == RESET_KIND_SUSPEND)
1565                 tg3_ape_driver_state_change(tp, kind);
1566 }
1567
1568 /* tp->lock is held. */
1569 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1570 {
1571         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1572                 switch (kind) {
1573                 case RESET_KIND_INIT:
1574                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1575                                       DRV_STATE_START_DONE);
1576                         break;
1577
1578                 case RESET_KIND_SHUTDOWN:
1579                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580                                       DRV_STATE_UNLOAD_DONE);
1581                         break;
1582
1583                 default:
1584                         break;
1585                 }
1586         }
1587
1588         if (kind == RESET_KIND_SHUTDOWN)
1589                 tg3_ape_driver_state_change(tp, kind);
1590 }
1591
1592 /* tp->lock is held. */
1593 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1594 {
1595         if (tg3_flag(tp, ENABLE_ASF)) {
1596                 switch (kind) {
1597                 case RESET_KIND_INIT:
1598                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1599                                       DRV_STATE_START);
1600                         break;
1601
1602                 case RESET_KIND_SHUTDOWN:
1603                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1604                                       DRV_STATE_UNLOAD);
1605                         break;
1606
1607                 case RESET_KIND_SUSPEND:
1608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1609                                       DRV_STATE_SUSPEND);
1610                         break;
1611
1612                 default:
1613                         break;
1614                 }
1615         }
1616 }
1617
1618 static int tg3_poll_fw(struct tg3 *tp)
1619 {
1620         int i;
1621         u32 val;
1622
1623         if (tg3_flag(tp, NO_FWARE_REPORTED))
1624                 return 0;
1625
1626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1627                 /* Wait up to 20ms for init done. */
1628                 for (i = 0; i < 200; i++) {
1629                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1630                                 return 0;
1631                         udelay(100);
1632                 }
1633                 return -ENODEV;
1634         }
1635
1636         /* Wait for firmware initialization to complete. */
1637         for (i = 0; i < 100000; i++) {
1638                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1639                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1640                         break;
1641                 udelay(10);
1642         }
1643
1644         /* Chip might not be fitted with firmware.  Some Sun onboard
1645          * parts are configured like that.  So don't signal the timeout
1646          * of the above loop as an error, but do report the lack of
1647          * running firmware once.
1648          */
1649         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1650                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1651
1652                 netdev_info(tp->dev, "No firmware running\n");
1653         }
1654
1655         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1656                 /* The 57765 A0 needs a little more
1657                  * time to do some important work.
1658                  */
1659                 mdelay(10);
1660         }
1661
1662         return 0;
1663 }
1664
1665 static void tg3_link_report(struct tg3 *tp)
1666 {
1667         if (!netif_carrier_ok(tp->dev)) {
1668                 netif_info(tp, link, tp->dev, "Link is down\n");
1669                 tg3_ump_link_report(tp);
1670         } else if (netif_msg_link(tp)) {
1671                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1672                             (tp->link_config.active_speed == SPEED_1000 ?
1673                              1000 :
1674                              (tp->link_config.active_speed == SPEED_100 ?
1675                               100 : 10)),
1676                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1677                              "full" : "half"));
1678
1679                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1680                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1681                             "on" : "off",
1682                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1683                             "on" : "off");
1684
1685                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1686                         netdev_info(tp->dev, "EEE is %s\n",
1687                                     tp->setlpicnt ? "enabled" : "disabled");
1688
1689                 tg3_ump_link_report(tp);
1690         }
1691 }
1692
1693 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1694 {
1695         u16 miireg;
1696
1697         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1698                 miireg = ADVERTISE_PAUSE_CAP;
1699         else if (flow_ctrl & FLOW_CTRL_TX)
1700                 miireg = ADVERTISE_PAUSE_ASYM;
1701         else if (flow_ctrl & FLOW_CTRL_RX)
1702                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1703         else
1704                 miireg = 0;
1705
1706         return miireg;
1707 }
1708
1709 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1710 {
1711         u16 miireg;
1712
1713         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1714                 miireg = ADVERTISE_1000XPAUSE;
1715         else if (flow_ctrl & FLOW_CTRL_TX)
1716                 miireg = ADVERTISE_1000XPSE_ASYM;
1717         else if (flow_ctrl & FLOW_CTRL_RX)
1718                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1719         else
1720                 miireg = 0;
1721
1722         return miireg;
1723 }
1724
1725 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1726 {
1727         u8 cap = 0;
1728
1729         if (lcladv & ADVERTISE_1000XPAUSE) {
1730                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1731                         if (rmtadv & LPA_1000XPAUSE)
1732                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1733                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1734                                 cap = FLOW_CTRL_RX;
1735                 } else {
1736                         if (rmtadv & LPA_1000XPAUSE)
1737                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1738                 }
1739         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1740                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1741                         cap = FLOW_CTRL_TX;
1742         }
1743
1744         return cap;
1745 }
1746
1747 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1748 {
1749         u8 autoneg;
1750         u8 flowctrl = 0;
1751         u32 old_rx_mode = tp->rx_mode;
1752         u32 old_tx_mode = tp->tx_mode;
1753
1754         if (tg3_flag(tp, USE_PHYLIB))
1755                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1756         else
1757                 autoneg = tp->link_config.autoneg;
1758
1759         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1760                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1761                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1762                 else
1763                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1764         } else
1765                 flowctrl = tp->link_config.flowctrl;
1766
1767         tp->link_config.active_flowctrl = flowctrl;
1768
1769         if (flowctrl & FLOW_CTRL_RX)
1770                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1771         else
1772                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1773
1774         if (old_rx_mode != tp->rx_mode)
1775                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1776
1777         if (flowctrl & FLOW_CTRL_TX)
1778                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1779         else
1780                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1781
1782         if (old_tx_mode != tp->tx_mode)
1783                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1784 }
1785
1786 static void tg3_adjust_link(struct net_device *dev)
1787 {
1788         u8 oldflowctrl, linkmesg = 0;
1789         u32 mac_mode, lcl_adv, rmt_adv;
1790         struct tg3 *tp = netdev_priv(dev);
1791         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1792
1793         spin_lock_bh(&tp->lock);
1794
1795         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1796                                     MAC_MODE_HALF_DUPLEX);
1797
1798         oldflowctrl = tp->link_config.active_flowctrl;
1799
1800         if (phydev->link) {
1801                 lcl_adv = 0;
1802                 rmt_adv = 0;
1803
1804                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1805                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1806                 else if (phydev->speed == SPEED_1000 ||
1807                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1808                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1809                 else
1810                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1811
1812                 if (phydev->duplex == DUPLEX_HALF)
1813                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1814                 else {
1815                         lcl_adv = tg3_advert_flowctrl_1000T(
1816                                   tp->link_config.flowctrl);
1817
1818                         if (phydev->pause)
1819                                 rmt_adv = LPA_PAUSE_CAP;
1820                         if (phydev->asym_pause)
1821                                 rmt_adv |= LPA_PAUSE_ASYM;
1822                 }
1823
1824                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1825         } else
1826                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1827
1828         if (mac_mode != tp->mac_mode) {
1829                 tp->mac_mode = mac_mode;
1830                 tw32_f(MAC_MODE, tp->mac_mode);
1831                 udelay(40);
1832         }
1833
1834         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1835                 if (phydev->speed == SPEED_10)
1836                         tw32(MAC_MI_STAT,
1837                              MAC_MI_STAT_10MBPS_MODE |
1838                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1839                 else
1840                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1841         }
1842
1843         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1844                 tw32(MAC_TX_LENGTHS,
1845                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1846                       (6 << TX_LENGTHS_IPG_SHIFT) |
1847                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1848         else
1849                 tw32(MAC_TX_LENGTHS,
1850                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1851                       (6 << TX_LENGTHS_IPG_SHIFT) |
1852                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1853
1854         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1855             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1856             phydev->speed != tp->link_config.active_speed ||
1857             phydev->duplex != tp->link_config.active_duplex ||
1858             oldflowctrl != tp->link_config.active_flowctrl)
1859                 linkmesg = 1;
1860
1861         tp->link_config.active_speed = phydev->speed;
1862         tp->link_config.active_duplex = phydev->duplex;
1863
1864         spin_unlock_bh(&tp->lock);
1865
1866         if (linkmesg)
1867                 tg3_link_report(tp);
1868 }
1869
1870 static int tg3_phy_init(struct tg3 *tp)
1871 {
1872         struct phy_device *phydev;
1873
1874         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1875                 return 0;
1876
1877         /* Bring the PHY back to a known state. */
1878         tg3_bmcr_reset(tp);
1879
1880         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1881
1882         /* Attach the MAC to the PHY. */
1883         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1884                              phydev->dev_flags, phydev->interface);
1885         if (IS_ERR(phydev)) {
1886                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1887                 return PTR_ERR(phydev);
1888         }
1889
1890         /* Mask with MAC supported features. */
1891         switch (phydev->interface) {
1892         case PHY_INTERFACE_MODE_GMII:
1893         case PHY_INTERFACE_MODE_RGMII:
1894                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1895                         phydev->supported &= (PHY_GBIT_FEATURES |
1896                                               SUPPORTED_Pause |
1897                                               SUPPORTED_Asym_Pause);
1898                         break;
1899                 }
1900                 /* fallthru */
1901         case PHY_INTERFACE_MODE_MII:
1902                 phydev->supported &= (PHY_BASIC_FEATURES |
1903                                       SUPPORTED_Pause |
1904                                       SUPPORTED_Asym_Pause);
1905                 break;
1906         default:
1907                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1908                 return -EINVAL;
1909         }
1910
1911         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1912
1913         phydev->advertising = phydev->supported;
1914
1915         return 0;
1916 }
1917
1918 static void tg3_phy_start(struct tg3 *tp)
1919 {
1920         struct phy_device *phydev;
1921
1922         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1923                 return;
1924
1925         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1926
1927         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1928                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1929                 phydev->speed = tp->link_config.orig_speed;
1930                 phydev->duplex = tp->link_config.orig_duplex;
1931                 phydev->autoneg = tp->link_config.orig_autoneg;
1932                 phydev->advertising = tp->link_config.orig_advertising;
1933         }
1934
1935         phy_start(phydev);
1936
1937         phy_start_aneg(phydev);
1938 }
1939
1940 static void tg3_phy_stop(struct tg3 *tp)
1941 {
1942         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1943                 return;
1944
1945         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1946 }
1947
1948 static void tg3_phy_fini(struct tg3 *tp)
1949 {
1950         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1951                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1952                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1953         }
1954 }
1955
1956 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1957 {
1958         int err;
1959         u32 val;
1960
1961         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1962                 return 0;
1963
1964         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1965                 /* Cannot do read-modify-write on 5401 */
1966                 err = tg3_phy_auxctl_write(tp,
1967                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1968                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1969                                            0x4c20);
1970                 goto done;
1971         }
1972
1973         err = tg3_phy_auxctl_read(tp,
1974                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1975         if (err)
1976                 return err;
1977
1978         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1979         err = tg3_phy_auxctl_write(tp,
1980                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1981
1982 done:
1983         return err;
1984 }
1985
1986 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1987 {
1988         u32 phytest;
1989
1990         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1991                 u32 phy;
1992
1993                 tg3_writephy(tp, MII_TG3_FET_TEST,
1994                              phytest | MII_TG3_FET_SHADOW_EN);
1995                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1996                         if (enable)
1997                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1998                         else
1999                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2000                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2001                 }
2002                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2003         }
2004 }
2005
2006 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2007 {
2008         u32 reg;
2009
2010         if (!tg3_flag(tp, 5705_PLUS) ||
2011             (tg3_flag(tp, 5717_PLUS) &&
2012              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2013                 return;
2014
2015         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2016                 tg3_phy_fet_toggle_apd(tp, enable);
2017                 return;
2018         }
2019
2020         reg = MII_TG3_MISC_SHDW_WREN |
2021               MII_TG3_MISC_SHDW_SCR5_SEL |
2022               MII_TG3_MISC_SHDW_SCR5_LPED |
2023               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2024               MII_TG3_MISC_SHDW_SCR5_SDTL |
2025               MII_TG3_MISC_SHDW_SCR5_C125OE;
2026         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2027                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2028
2029         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2030
2031
2032         reg = MII_TG3_MISC_SHDW_WREN |
2033               MII_TG3_MISC_SHDW_APD_SEL |
2034               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2035         if (enable)
2036                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2037
2038         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2039 }
2040
2041 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2042 {
2043         u32 phy;
2044
2045         if (!tg3_flag(tp, 5705_PLUS) ||
2046             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2047                 return;
2048
2049         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2050                 u32 ephy;
2051
2052                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2053                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2054
2055                         tg3_writephy(tp, MII_TG3_FET_TEST,
2056                                      ephy | MII_TG3_FET_SHADOW_EN);
2057                         if (!tg3_readphy(tp, reg, &phy)) {
2058                                 if (enable)
2059                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2060                                 else
2061                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2062                                 tg3_writephy(tp, reg, phy);
2063                         }
2064                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2065                 }
2066         } else {
2067                 int ret;
2068
2069                 ret = tg3_phy_auxctl_read(tp,
2070                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2071                 if (!ret) {
2072                         if (enable)
2073                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2074                         else
2075                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2076                         tg3_phy_auxctl_write(tp,
2077                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2078                 }
2079         }
2080 }
2081
2082 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2083 {
2084         int ret;
2085         u32 val;
2086
2087         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2088                 return;
2089
2090         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2091         if (!ret)
2092                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2093                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2094 }
2095
2096 static void tg3_phy_apply_otp(struct tg3 *tp)
2097 {
2098         u32 otp, phy;
2099
2100         if (!tp->phy_otp)
2101                 return;
2102
2103         otp = tp->phy_otp;
2104
2105         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2106                 return;
2107
2108         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2109         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2110         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2111
2112         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2113               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2114         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2115
2116         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2117         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2118         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2119
2120         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2121         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2122
2123         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2124         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2125
2126         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2127               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2128         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2129
2130         tg3_phy_toggle_auxctl_smdsp(tp, false);
2131 }
2132
2133 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2134 {
2135         u32 val;
2136
2137         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2138                 return;
2139
2140         tp->setlpicnt = 0;
2141
2142         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2143             current_link_up == 1 &&
2144             tp->link_config.active_duplex == DUPLEX_FULL &&
2145             (tp->link_config.active_speed == SPEED_100 ||
2146              tp->link_config.active_speed == SPEED_1000)) {
2147                 u32 eeectl;
2148
2149                 if (tp->link_config.active_speed == SPEED_1000)
2150                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2151                 else
2152                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2153
2154                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2155
2156                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2157                                   TG3_CL45_D7_EEERES_STAT, &val);
2158
2159                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2160                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2161                         tp->setlpicnt = 2;
2162         }
2163
2164         if (!tp->setlpicnt) {
2165                 if (current_link_up == 1 &&
2166                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2167                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2168                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2169                 }
2170
2171                 val = tr32(TG3_CPMU_EEE_MODE);
2172                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2173         }
2174 }
2175
2176 static void tg3_phy_eee_enable(struct tg3 *tp)
2177 {
2178         u32 val;
2179
2180         if (tp->link_config.active_speed == SPEED_1000 &&
2181             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2183              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2184             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2185                 val = MII_TG3_DSP_TAP26_ALNOKO |
2186                       MII_TG3_DSP_TAP26_RMRXSTO;
2187                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2188                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2189         }
2190
2191         val = tr32(TG3_CPMU_EEE_MODE);
2192         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2193 }
2194
2195 static int tg3_wait_macro_done(struct tg3 *tp)
2196 {
2197         int limit = 100;
2198
2199         while (limit--) {
2200                 u32 tmp32;
2201
2202                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2203                         if ((tmp32 & 0x1000) == 0)
2204                                 break;
2205                 }
2206         }
2207         if (limit < 0)
2208                 return -EBUSY;
2209
2210         return 0;
2211 }
2212
2213 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2214 {
2215         static const u32 test_pat[4][6] = {
2216         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2217         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2218         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2219         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2220         };
2221         int chan;
2222
2223         for (chan = 0; chan < 4; chan++) {
2224                 int i;
2225
2226                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2227                              (chan * 0x2000) | 0x0200);
2228                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2229
2230                 for (i = 0; i < 6; i++)
2231                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2232                                      test_pat[chan][i]);
2233
2234                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2235                 if (tg3_wait_macro_done(tp)) {
2236                         *resetp = 1;
2237                         return -EBUSY;
2238                 }
2239
2240                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2241                              (chan * 0x2000) | 0x0200);
2242                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2243                 if (tg3_wait_macro_done(tp)) {
2244                         *resetp = 1;
2245                         return -EBUSY;
2246                 }
2247
2248                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2249                 if (tg3_wait_macro_done(tp)) {
2250                         *resetp = 1;
2251                         return -EBUSY;
2252                 }
2253
2254                 for (i = 0; i < 6; i += 2) {
2255                         u32 low, high;
2256
2257                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2258                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2259                             tg3_wait_macro_done(tp)) {
2260                                 *resetp = 1;
2261                                 return -EBUSY;
2262                         }
2263                         low &= 0x7fff;
2264                         high &= 0x000f;
2265                         if (low != test_pat[chan][i] ||
2266                             high != test_pat[chan][i+1]) {
2267                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2268                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2269                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2270
2271                                 return -EBUSY;
2272                         }
2273                 }
2274         }
2275
2276         return 0;
2277 }
2278
2279 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2280 {
2281         int chan;
2282
2283         for (chan = 0; chan < 4; chan++) {
2284                 int i;
2285
2286                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2287                              (chan * 0x2000) | 0x0200);
2288                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2289                 for (i = 0; i < 6; i++)
2290                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2291                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2292                 if (tg3_wait_macro_done(tp))
2293                         return -EBUSY;
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2300 {
2301         u32 reg32, phy9_orig;
2302         int retries, do_phy_reset, err;
2303
2304         retries = 10;
2305         do_phy_reset = 1;
2306         do {
2307                 if (do_phy_reset) {
2308                         err = tg3_bmcr_reset(tp);
2309                         if (err)
2310                                 return err;
2311                         do_phy_reset = 0;
2312                 }
2313
2314                 /* Disable transmitter and interrupt.  */
2315                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2316                         continue;
2317
2318                 reg32 |= 0x3000;
2319                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2320
2321                 /* Set full-duplex, 1000 mbps.  */
2322                 tg3_writephy(tp, MII_BMCR,
2323                              BMCR_FULLDPLX | BMCR_SPEED1000);
2324
2325                 /* Set to master mode.  */
2326                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2327                         continue;
2328
2329                 tg3_writephy(tp, MII_CTRL1000,
2330                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2331
2332                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2333                 if (err)
2334                         return err;
2335
2336                 /* Block the PHY control access.  */
2337                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2338
2339                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2340                 if (!err)
2341                         break;
2342         } while (--retries);
2343
2344         err = tg3_phy_reset_chanpat(tp);
2345         if (err)
2346                 return err;
2347
2348         tg3_phydsp_write(tp, 0x8005, 0x0000);
2349
2350         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2351         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2352
2353         tg3_phy_toggle_auxctl_smdsp(tp, false);
2354
2355         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2356
2357         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2358                 reg32 &= ~0x3000;
2359                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2360         } else if (!err)
2361                 err = -EBUSY;
2362
2363         return err;
2364 }
2365
2366 /* This will reset the tigon3 PHY if there is no valid
2367  * link unless the FORCE argument is non-zero.
2368  */
2369 static int tg3_phy_reset(struct tg3 *tp)
2370 {
2371         u32 val, cpmuctrl;
2372         int err;
2373
2374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2375                 val = tr32(GRC_MISC_CFG);
2376                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2377                 udelay(40);
2378         }
2379         err  = tg3_readphy(tp, MII_BMSR, &val);
2380         err |= tg3_readphy(tp, MII_BMSR, &val);
2381         if (err != 0)
2382                 return -EBUSY;
2383
2384         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2385                 netif_carrier_off(tp->dev);
2386                 tg3_link_report(tp);
2387         }
2388
2389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2391             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2392                 err = tg3_phy_reset_5703_4_5(tp);
2393                 if (err)
2394                         return err;
2395                 goto out;
2396         }
2397
2398         cpmuctrl = 0;
2399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2400             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2401                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2402                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2403                         tw32(TG3_CPMU_CTRL,
2404                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2405         }
2406
2407         err = tg3_bmcr_reset(tp);
2408         if (err)
2409                 return err;
2410
2411         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2412                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2413                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2414
2415                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2416         }
2417
2418         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2419             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2420                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2421                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2422                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2423                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2424                         udelay(40);
2425                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2426                 }
2427         }
2428
2429         if (tg3_flag(tp, 5717_PLUS) &&
2430             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2431                 return 0;
2432
2433         tg3_phy_apply_otp(tp);
2434
2435         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2436                 tg3_phy_toggle_apd(tp, true);
2437         else
2438                 tg3_phy_toggle_apd(tp, false);
2439
2440 out:
2441         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2442             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2443                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2444                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2445                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2446         }
2447
2448         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2449                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2450                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2451         }
2452
2453         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2454                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2455                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2456                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2457                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2458                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2459                 }
2460         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2461                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2462                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2463                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2464                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2465                                 tg3_writephy(tp, MII_TG3_TEST1,
2466                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2467                         } else
2468                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2469
2470                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2471                 }
2472         }
2473
2474         /* Set Extended packet length bit (bit 14) on all chips that */
2475         /* support jumbo frames */
2476         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2477                 /* Cannot do read-modify-write on 5401 */
2478                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2479         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2480                 /* Set bit 14 with read-modify-write to preserve other bits */
2481                 err = tg3_phy_auxctl_read(tp,
2482                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2483                 if (!err)
2484                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2485                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2486         }
2487
2488         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2489          * jumbo frames transmission.
2490          */
2491         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2492                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2493                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2494                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2495         }
2496
2497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2498                 /* adjust output voltage */
2499                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2500         }
2501
2502         tg3_phy_toggle_automdix(tp, 1);
2503         tg3_phy_set_wirespeed(tp);
2504         return 0;
2505 }
2506
2507 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2508 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2509 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2510                                           TG3_GPIO_MSG_NEED_VAUX)
2511 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2512         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2513          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2514          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2515          (TG3_GPIO_MSG_DRVR_PRES << 12))
2516
2517 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2518         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2519          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2520          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2521          (TG3_GPIO_MSG_NEED_VAUX << 12))
2522
2523 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2524 {
2525         u32 status, shift;
2526
2527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2528             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2529                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2530         else
2531                 status = tr32(TG3_CPMU_DRV_STATUS);
2532
2533         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2534         status &= ~(TG3_GPIO_MSG_MASK << shift);
2535         status |= (newstat << shift);
2536
2537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2539                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2540         else
2541                 tw32(TG3_CPMU_DRV_STATUS, status);
2542
2543         return status >> TG3_APE_GPIO_MSG_SHIFT;
2544 }
2545
2546 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2547 {
2548         if (!tg3_flag(tp, IS_NIC))
2549                 return 0;
2550
2551         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2553             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2554                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2555                         return -EIO;
2556
2557                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2558
2559                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2560                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2561
2562                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2563         } else {
2564                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2565                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2566         }
2567
2568         return 0;
2569 }
2570
2571 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2572 {
2573         u32 grc_local_ctrl;
2574
2575         if (!tg3_flag(tp, IS_NIC) ||
2576             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2577             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2578                 return;
2579
2580         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2581
2582         tw32_wait_f(GRC_LOCAL_CTRL,
2583                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2584                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2585
2586         tw32_wait_f(GRC_LOCAL_CTRL,
2587                     grc_local_ctrl,
2588                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2589
2590         tw32_wait_f(GRC_LOCAL_CTRL,
2591                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2592                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2593 }
2594
2595 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2596 {
2597         if (!tg3_flag(tp, IS_NIC))
2598                 return;
2599
2600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2602                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2603                             (GRC_LCLCTRL_GPIO_OE0 |
2604                              GRC_LCLCTRL_GPIO_OE1 |
2605                              GRC_LCLCTRL_GPIO_OE2 |
2606                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2607                              GRC_LCLCTRL_GPIO_OUTPUT1),
2608                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2609         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2610                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2611                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2612                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2613                                      GRC_LCLCTRL_GPIO_OE1 |
2614                                      GRC_LCLCTRL_GPIO_OE2 |
2615                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2616                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2617                                      tp->grc_local_ctrl;
2618                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2622                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2623                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2626                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2627                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2628         } else {
2629                 u32 no_gpio2;
2630                 u32 grc_local_ctrl = 0;
2631
2632                 /* Workaround to prevent overdrawing Amps. */
2633                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2634                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2635                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2636                                     grc_local_ctrl,
2637                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2638                 }
2639
2640                 /* On 5753 and variants, GPIO2 cannot be used. */
2641                 no_gpio2 = tp->nic_sram_data_cfg &
2642                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2643
2644                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2645                                   GRC_LCLCTRL_GPIO_OE1 |
2646                                   GRC_LCLCTRL_GPIO_OE2 |
2647                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2648                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2649                 if (no_gpio2) {
2650                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2651                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2652                 }
2653                 tw32_wait_f(GRC_LOCAL_CTRL,
2654                             tp->grc_local_ctrl | grc_local_ctrl,
2655                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2656
2657                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2658
2659                 tw32_wait_f(GRC_LOCAL_CTRL,
2660                             tp->grc_local_ctrl | grc_local_ctrl,
2661                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2662
2663                 if (!no_gpio2) {
2664                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2665                         tw32_wait_f(GRC_LOCAL_CTRL,
2666                                     tp->grc_local_ctrl | grc_local_ctrl,
2667                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2668                 }
2669         }
2670 }
2671
2672 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2673 {
2674         u32 msg = 0;
2675
2676         /* Serialize power state transitions */
2677         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2678                 return;
2679
2680         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2681                 msg = TG3_GPIO_MSG_NEED_VAUX;
2682
2683         msg = tg3_set_function_status(tp, msg);
2684
2685         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2686                 goto done;
2687
2688         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2689                 tg3_pwrsrc_switch_to_vaux(tp);
2690         else
2691                 tg3_pwrsrc_die_with_vmain(tp);
2692
2693 done:
2694         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2695 }
2696
2697 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2698 {
2699         bool need_vaux = false;
2700
2701         /* The GPIOs do something completely different on 57765. */
2702         if (!tg3_flag(tp, IS_NIC) ||
2703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2704                 return;
2705
2706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2709                 tg3_frob_aux_power_5717(tp, include_wol ?
2710                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2711                 return;
2712         }
2713
2714         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2715                 struct net_device *dev_peer;
2716
2717                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2718
2719                 /* remove_one() may have been run on the peer. */
2720                 if (dev_peer) {
2721                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2722
2723                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2724                                 return;
2725
2726                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2727                             tg3_flag(tp_peer, ENABLE_ASF))
2728                                 need_vaux = true;
2729                 }
2730         }
2731
2732         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2733             tg3_flag(tp, ENABLE_ASF))
2734                 need_vaux = true;
2735
2736         if (need_vaux)
2737                 tg3_pwrsrc_switch_to_vaux(tp);
2738         else
2739                 tg3_pwrsrc_die_with_vmain(tp);
2740 }
2741
2742 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2743 {
2744         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2745                 return 1;
2746         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2747                 if (speed != SPEED_10)
2748                         return 1;
2749         } else if (speed == SPEED_10)
2750                 return 1;
2751
2752         return 0;
2753 }
2754
2755 static int tg3_setup_phy(struct tg3 *, int);
2756 static int tg3_halt_cpu(struct tg3 *, u32);
2757
2758 static bool tg3_phy_power_bug(struct tg3 *tp)
2759 {
2760         switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2761         case ASIC_REV_5700:
2762         case ASIC_REV_5704:
2763                 return true;
2764         case ASIC_REV_5780:
2765                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2766                         return true;
2767                 return false;
2768         case ASIC_REV_5717:
2769                 if (!tp->pci_fn)
2770                         return true;
2771                 return false;
2772         case ASIC_REV_5719:
2773         case ASIC_REV_5720:
2774                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2775                     !tp->pci_fn)
2776                         return true;
2777                 return false;
2778         }
2779
2780         return false;
2781 }
2782
2783 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2784 {
2785         u32 val;
2786
2787         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2789                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2790                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2791
2792                         sg_dig_ctrl |=
2793                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2794                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2795                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2796                 }
2797                 return;
2798         }
2799
2800         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2801                 tg3_bmcr_reset(tp);
2802                 val = tr32(GRC_MISC_CFG);
2803                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2804                 udelay(40);
2805                 return;
2806         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2807                 u32 phytest;
2808                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2809                         u32 phy;
2810
2811                         tg3_writephy(tp, MII_ADVERTISE, 0);
2812                         tg3_writephy(tp, MII_BMCR,
2813                                      BMCR_ANENABLE | BMCR_ANRESTART);
2814
2815                         tg3_writephy(tp, MII_TG3_FET_TEST,
2816                                      phytest | MII_TG3_FET_SHADOW_EN);
2817                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2818                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2819                                 tg3_writephy(tp,
2820                                              MII_TG3_FET_SHDW_AUXMODE4,
2821                                              phy);
2822                         }
2823                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2824                 }
2825                 return;
2826         } else if (do_low_power) {
2827                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2828                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2829
2830                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2831                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2832                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2833                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2834         }
2835
2836         /* The PHY should not be powered down on some chips because
2837          * of bugs.
2838          */
2839         if (tg3_phy_power_bug(tp))
2840                 return;
2841
2842         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2843             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2844                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2845                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2846                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2847                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2848         }
2849
2850         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2851 }
2852
2853 /* tp->lock is held. */
2854 static int tg3_nvram_lock(struct tg3 *tp)
2855 {
2856         if (tg3_flag(tp, NVRAM)) {
2857                 int i;
2858
2859                 if (tp->nvram_lock_cnt == 0) {
2860                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2861                         for (i = 0; i < 8000; i++) {
2862                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2863                                         break;
2864                                 udelay(20);
2865                         }
2866                         if (i == 8000) {
2867                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2868                                 return -ENODEV;
2869                         }
2870                 }
2871                 tp->nvram_lock_cnt++;
2872         }
2873         return 0;
2874 }
2875
2876 /* tp->lock is held. */
2877 static void tg3_nvram_unlock(struct tg3 *tp)
2878 {
2879         if (tg3_flag(tp, NVRAM)) {
2880                 if (tp->nvram_lock_cnt > 0)
2881                         tp->nvram_lock_cnt--;
2882                 if (tp->nvram_lock_cnt == 0)
2883                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2884         }
2885 }
2886
2887 /* tp->lock is held. */
2888 static void tg3_enable_nvram_access(struct tg3 *tp)
2889 {
2890         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2891                 u32 nvaccess = tr32(NVRAM_ACCESS);
2892
2893                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2894         }
2895 }
2896
2897 /* tp->lock is held. */
2898 static void tg3_disable_nvram_access(struct tg3 *tp)
2899 {
2900         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2901                 u32 nvaccess = tr32(NVRAM_ACCESS);
2902
2903                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2904         }
2905 }
2906
2907 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2908                                         u32 offset, u32 *val)
2909 {
2910         u32 tmp;
2911         int i;
2912
2913         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2914                 return -EINVAL;
2915
2916         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2917                                         EEPROM_ADDR_DEVID_MASK |
2918                                         EEPROM_ADDR_READ);
2919         tw32(GRC_EEPROM_ADDR,
2920              tmp |
2921              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2922              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2923               EEPROM_ADDR_ADDR_MASK) |
2924              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2925
2926         for (i = 0; i < 1000; i++) {
2927                 tmp = tr32(GRC_EEPROM_ADDR);
2928
2929                 if (tmp & EEPROM_ADDR_COMPLETE)
2930                         break;
2931                 msleep(1);
2932         }
2933         if (!(tmp & EEPROM_ADDR_COMPLETE))
2934                 return -EBUSY;
2935
2936         tmp = tr32(GRC_EEPROM_DATA);
2937
2938         /*
2939          * The data will always be opposite the native endian
2940          * format.  Perform a blind byteswap to compensate.
2941          */
2942         *val = swab32(tmp);
2943
2944         return 0;
2945 }
2946
2947 #define NVRAM_CMD_TIMEOUT 10000
2948
2949 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2950 {
2951         int i;
2952
2953         tw32(NVRAM_CMD, nvram_cmd);
2954         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2955                 udelay(10);
2956                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2957                         udelay(10);
2958                         break;
2959                 }
2960         }
2961
2962         if (i == NVRAM_CMD_TIMEOUT)
2963                 return -EBUSY;
2964
2965         return 0;
2966 }
2967
2968 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2969 {
2970         if (tg3_flag(tp, NVRAM) &&
2971             tg3_flag(tp, NVRAM_BUFFERED) &&
2972             tg3_flag(tp, FLASH) &&
2973             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2974             (tp->nvram_jedecnum == JEDEC_ATMEL))
2975
2976                 addr = ((addr / tp->nvram_pagesize) <<
2977                         ATMEL_AT45DB0X1B_PAGE_POS) +
2978                        (addr % tp->nvram_pagesize);
2979
2980         return addr;
2981 }
2982
2983 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2984 {
2985         if (tg3_flag(tp, NVRAM) &&
2986             tg3_flag(tp, NVRAM_BUFFERED) &&
2987             tg3_flag(tp, FLASH) &&
2988             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2989             (tp->nvram_jedecnum == JEDEC_ATMEL))
2990
2991                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2992                         tp->nvram_pagesize) +
2993                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2994
2995         return addr;
2996 }
2997
2998 /* NOTE: Data read in from NVRAM is byteswapped according to
2999  * the byteswapping settings for all other register accesses.
3000  * tg3 devices are BE devices, so on a BE machine, the data
3001  * returned will be exactly as it is seen in NVRAM.  On a LE
3002  * machine, the 32-bit value will be byteswapped.
3003  */
3004 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3005 {
3006         int ret;
3007
3008         if (!tg3_flag(tp, NVRAM))
3009                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3010
3011         offset = tg3_nvram_phys_addr(tp, offset);
3012
3013         if (offset > NVRAM_ADDR_MSK)
3014                 return -EINVAL;
3015
3016         ret = tg3_nvram_lock(tp);
3017         if (ret)
3018                 return ret;
3019
3020         tg3_enable_nvram_access(tp);
3021
3022         tw32(NVRAM_ADDR, offset);
3023         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3024                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3025
3026         if (ret == 0)
3027                 *val = tr32(NVRAM_RDDATA);
3028
3029         tg3_disable_nvram_access(tp);
3030
3031         tg3_nvram_unlock(tp);
3032
3033         return ret;
3034 }
3035
3036 /* Ensures NVRAM data is in bytestream format. */
3037 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3038 {
3039         u32 v;
3040         int res = tg3_nvram_read(tp, offset, &v);
3041         if (!res)
3042                 *val = cpu_to_be32(v);
3043         return res;
3044 }
3045
3046 #define RX_CPU_SCRATCH_BASE     0x30000
3047 #define RX_CPU_SCRATCH_SIZE     0x04000
3048 #define TX_CPU_SCRATCH_BASE     0x34000
3049 #define TX_CPU_SCRATCH_SIZE     0x04000
3050
3051 /* tp->lock is held. */
3052 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3053 {
3054         int i;
3055
3056         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3057
3058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3059                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3060
3061                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3062                 return 0;
3063         }
3064         if (offset == RX_CPU_BASE) {
3065                 for (i = 0; i < 10000; i++) {
3066                         tw32(offset + CPU_STATE, 0xffffffff);
3067                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3068                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3069                                 break;
3070                 }
3071
3072                 tw32(offset + CPU_STATE, 0xffffffff);
3073                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3074                 udelay(10);
3075         } else {
3076                 for (i = 0; i < 10000; i++) {
3077                         tw32(offset + CPU_STATE, 0xffffffff);
3078                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3079                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3080                                 break;
3081                 }
3082         }
3083
3084         if (i >= 10000) {
3085                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3086                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3087                 return -ENODEV;
3088         }
3089
3090         /* Clear firmware's nvram arbitration. */
3091         if (tg3_flag(tp, NVRAM))
3092                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3093         return 0;
3094 }
3095
3096 struct fw_info {
3097         unsigned int fw_base;
3098         unsigned int fw_len;
3099         const __be32 *fw_data;
3100 };
3101
3102 /* tp->lock is held. */
3103 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3104                                  u32 cpu_scratch_base, int cpu_scratch_size,
3105                                  struct fw_info *info)
3106 {
3107         int err, lock_err, i;
3108         void (*write_op)(struct tg3 *, u32, u32);
3109
3110         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3111                 netdev_err(tp->dev,
3112                            "%s: Trying to load TX cpu firmware which is 5705\n",
3113                            __func__);
3114                 return -EINVAL;
3115         }
3116
3117         if (tg3_flag(tp, 5705_PLUS))
3118                 write_op = tg3_write_mem;
3119         else
3120                 write_op = tg3_write_indirect_reg32;
3121
3122         /* It is possible that bootcode is still loading at this point.
3123          * Get the nvram lock first before halting the cpu.
3124          */
3125         lock_err = tg3_nvram_lock(tp);
3126         err = tg3_halt_cpu(tp, cpu_base);
3127         if (!lock_err)
3128                 tg3_nvram_unlock(tp);
3129         if (err)
3130                 goto out;
3131
3132         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3133                 write_op(tp, cpu_scratch_base + i, 0);
3134         tw32(cpu_base + CPU_STATE, 0xffffffff);
3135         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3136         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3137                 write_op(tp, (cpu_scratch_base +
3138                               (info->fw_base & 0xffff) +
3139                               (i * sizeof(u32))),
3140                               be32_to_cpu(info->fw_data[i]));
3141
3142         err = 0;
3143
3144 out:
3145         return err;
3146 }
3147
3148 /* tp->lock is held. */
3149 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3150 {
3151         struct fw_info info;
3152         const __be32 *fw_data;
3153         int err, i;
3154
3155         fw_data = (void *)tp->fw->data;
3156
3157         /* Firmware blob starts with version numbers, followed by
3158            start address and length. We are setting complete length.
3159            length = end_address_of_bss - start_address_of_text.
3160            Remainder is the blob to be loaded contiguously
3161            from start address. */
3162
3163         info.fw_base = be32_to_cpu(fw_data[1]);
3164         info.fw_len = tp->fw->size - 12;
3165         info.fw_data = &fw_data[3];
3166
3167         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3168                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3169                                     &info);
3170         if (err)
3171                 return err;
3172
3173         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3174                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3175                                     &info);
3176         if (err)
3177                 return err;
3178
3179         /* Now startup only the RX cpu. */
3180         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3181         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3182
3183         for (i = 0; i < 5; i++) {
3184                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3185                         break;
3186                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3187                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3188                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3189                 udelay(1000);
3190         }
3191         if (i >= 5) {
3192                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3193                            "should be %08x\n", __func__,
3194                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3195                 return -ENODEV;
3196         }
3197         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3198         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3199
3200         return 0;
3201 }
3202
3203 /* tp->lock is held. */
3204 static int tg3_load_tso_firmware(struct tg3 *tp)
3205 {
3206         struct fw_info info;
3207         const __be32 *fw_data;
3208         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3209         int err, i;
3210
3211         if (tg3_flag(tp, HW_TSO_1) ||
3212             tg3_flag(tp, HW_TSO_2) ||
3213             tg3_flag(tp, HW_TSO_3))
3214                 return 0;
3215
3216         fw_data = (void *)tp->fw->data;
3217
3218         /* Firmware blob starts with version numbers, followed by
3219            start address and length. We are setting complete length.
3220            length = end_address_of_bss - start_address_of_text.
3221            Remainder is the blob to be loaded contiguously
3222            from start address. */
3223
3224         info.fw_base = be32_to_cpu(fw_data[1]);
3225         cpu_scratch_size = tp->fw_len;
3226         info.fw_len = tp->fw->size - 12;
3227         info.fw_data = &fw_data[3];
3228
3229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3230                 cpu_base = RX_CPU_BASE;
3231                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3232         } else {
3233                 cpu_base = TX_CPU_BASE;
3234                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3235                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3236         }
3237
3238         err = tg3_load_firmware_cpu(tp, cpu_base,
3239                                     cpu_scratch_base, cpu_scratch_size,
3240                                     &info);
3241         if (err)
3242                 return err;
3243
3244         /* Now startup the cpu. */
3245         tw32(cpu_base + CPU_STATE, 0xffffffff);
3246         tw32_f(cpu_base + CPU_PC, info.fw_base);
3247
3248         for (i = 0; i < 5; i++) {
3249                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3250                         break;
3251                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3252                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3253                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3254                 udelay(1000);
3255         }
3256         if (i >= 5) {
3257                 netdev_err(tp->dev,
3258                            "%s fails to set CPU PC, is %08x should be %08x\n",
3259                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3260                 return -ENODEV;
3261         }
3262         tw32(cpu_base + CPU_STATE, 0xffffffff);
3263         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3264         return 0;
3265 }
3266
3267
3268 /* tp->lock is held. */
3269 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3270 {
3271         u32 addr_high, addr_low;
3272         int i;
3273
3274         addr_high = ((tp->dev->dev_addr[0] << 8) |
3275                      tp->dev->dev_addr[1]);
3276         addr_low = ((tp->dev->dev_addr[2] << 24) |
3277                     (tp->dev->dev_addr[3] << 16) |
3278                     (tp->dev->dev_addr[4] <<  8) |
3279                     (tp->dev->dev_addr[5] <<  0));
3280         for (i = 0; i < 4; i++) {
3281                 if (i == 1 && skip_mac_1)
3282                         continue;
3283                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3284                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3285         }
3286
3287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3289                 for (i = 0; i < 12; i++) {
3290                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3291                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3292                 }
3293         }
3294
3295         addr_high = (tp->dev->dev_addr[0] +
3296                      tp->dev->dev_addr[1] +
3297                      tp->dev->dev_addr[2] +
3298                      tp->dev->dev_addr[3] +
3299                      tp->dev->dev_addr[4] +
3300                      tp->dev->dev_addr[5]) &
3301                 TX_BACKOFF_SEED_MASK;
3302         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3303 }
3304
3305 static void tg3_enable_register_access(struct tg3 *tp)
3306 {
3307         /*
3308          * Make sure register accesses (indirect or otherwise) will function
3309          * correctly.
3310          */
3311         pci_write_config_dword(tp->pdev,
3312                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3313 }
3314
3315 static int tg3_power_up(struct tg3 *tp)
3316 {
3317         int err;
3318
3319         tg3_enable_register_access(tp);
3320
3321         err = pci_set_power_state(tp->pdev, PCI_D0);
3322         if (!err) {
3323                 /* Switch out of Vaux if it is a NIC */
3324                 tg3_pwrsrc_switch_to_vmain(tp);
3325         } else {
3326                 netdev_err(tp->dev, "Transition to D0 failed\n");
3327         }
3328
3329         return err;
3330 }
3331
3332 static int tg3_power_down_prepare(struct tg3 *tp)
3333 {
3334         u32 misc_host_ctrl;
3335         bool device_should_wake, do_low_power;
3336
3337         tg3_enable_register_access(tp);
3338
3339         /* Restore the CLKREQ setting. */
3340         if (tg3_flag(tp, CLKREQ_BUG)) {
3341                 u16 lnkctl;
3342
3343                 pci_read_config_word(tp->pdev,
3344                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3345                                      &lnkctl);
3346                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3347                 pci_write_config_word(tp->pdev,
3348                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3349                                       lnkctl);
3350         }
3351
3352         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3353         tw32(TG3PCI_MISC_HOST_CTRL,
3354              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3355
3356         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3357                              tg3_flag(tp, WOL_ENABLE);
3358
3359         if (tg3_flag(tp, USE_PHYLIB)) {
3360                 do_low_power = false;
3361                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3362                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363                         struct phy_device *phydev;
3364                         u32 phyid, advertising;
3365
3366                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3367
3368                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3369
3370                         tp->link_config.orig_speed = phydev->speed;
3371                         tp->link_config.orig_duplex = phydev->duplex;
3372                         tp->link_config.orig_autoneg = phydev->autoneg;
3373                         tp->link_config.orig_advertising = phydev->advertising;
3374
3375                         advertising = ADVERTISED_TP |
3376                                       ADVERTISED_Pause |
3377                                       ADVERTISED_Autoneg |
3378                                       ADVERTISED_10baseT_Half;
3379
3380                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3381                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3382                                         advertising |=
3383                                                 ADVERTISED_100baseT_Half |
3384                                                 ADVERTISED_100baseT_Full |
3385                                                 ADVERTISED_10baseT_Full;
3386                                 else
3387                                         advertising |= ADVERTISED_10baseT_Full;
3388                         }
3389
3390                         phydev->advertising = advertising;
3391
3392                         phy_start_aneg(phydev);
3393
3394                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3395                         if (phyid != PHY_ID_BCMAC131) {
3396                                 phyid &= PHY_BCM_OUI_MASK;
3397                                 if (phyid == PHY_BCM_OUI_1 ||
3398                                     phyid == PHY_BCM_OUI_2 ||
3399                                     phyid == PHY_BCM_OUI_3)
3400                                         do_low_power = true;
3401                         }
3402                 }
3403         } else {
3404                 do_low_power = true;
3405
3406                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3407                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3408                         tp->link_config.orig_speed = tp->link_config.speed;
3409                         tp->link_config.orig_duplex = tp->link_config.duplex;
3410                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3411                 }
3412
3413                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3414                         tp->link_config.speed = SPEED_10;
3415                         tp->link_config.duplex = DUPLEX_HALF;
3416                         tp->link_config.autoneg = AUTONEG_ENABLE;
3417                         tg3_setup_phy(tp, 0);
3418                 }
3419         }
3420
3421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3422                 u32 val;
3423
3424                 val = tr32(GRC_VCPU_EXT_CTRL);
3425                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3426         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3427                 int i;
3428                 u32 val;
3429
3430                 for (i = 0; i < 200; i++) {
3431                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3432                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3433                                 break;
3434                         msleep(1);
3435                 }
3436         }
3437         if (tg3_flag(tp, WOL_CAP))
3438                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3439                                                      WOL_DRV_STATE_SHUTDOWN |
3440                                                      WOL_DRV_WOL |
3441                                                      WOL_SET_MAGIC_PKT);
3442
3443         if (device_should_wake) {
3444                 u32 mac_mode;
3445
3446                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3447                         if (do_low_power &&
3448                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3449                                 tg3_phy_auxctl_write(tp,
3450                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3451                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3452                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3453                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3454                                 udelay(40);
3455                         }
3456
3457                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3458                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3459                         else
3460                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3461
3462                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3463                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3464                             ASIC_REV_5700) {
3465                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3466                                              SPEED_100 : SPEED_10;
3467                                 if (tg3_5700_link_polarity(tp, speed))
3468                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3469                                 else
3470                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3471                         }
3472                 } else {
3473                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3474                 }
3475
3476                 if (!tg3_flag(tp, 5750_PLUS))
3477                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3478
3479                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3480                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3481                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3482                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3483
3484                 if (tg3_flag(tp, ENABLE_APE))
3485                         mac_mode |= MAC_MODE_APE_TX_EN |
3486                                     MAC_MODE_APE_RX_EN |
3487                                     MAC_MODE_TDE_ENABLE;
3488
3489                 tw32_f(MAC_MODE, mac_mode);
3490                 udelay(100);
3491
3492                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3493                 udelay(10);
3494         }
3495
3496         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3497             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3498              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3499                 u32 base_val;
3500
3501                 base_val = tp->pci_clock_ctrl;
3502                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3503                              CLOCK_CTRL_TXCLK_DISABLE);
3504
3505                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3506                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3507         } else if (tg3_flag(tp, 5780_CLASS) ||
3508                    tg3_flag(tp, CPMU_PRESENT) ||
3509                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3510                 /* do nothing */
3511         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3512                 u32 newbits1, newbits2;
3513
3514                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3515                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3516                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3517                                     CLOCK_CTRL_TXCLK_DISABLE |
3518                                     CLOCK_CTRL_ALTCLK);
3519                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3520                 } else if (tg3_flag(tp, 5705_PLUS)) {
3521                         newbits1 = CLOCK_CTRL_625_CORE;
3522                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3523                 } else {
3524                         newbits1 = CLOCK_CTRL_ALTCLK;
3525                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3526                 }
3527
3528                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3529                             40);
3530
3531                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3532                             40);
3533
3534                 if (!tg3_flag(tp, 5705_PLUS)) {
3535                         u32 newbits3;
3536
3537                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3538                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3539                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3540                                             CLOCK_CTRL_TXCLK_DISABLE |
3541                                             CLOCK_CTRL_44MHZ_CORE);
3542                         } else {
3543                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3544                         }
3545
3546                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3547                                     tp->pci_clock_ctrl | newbits3, 40);
3548                 }
3549         }
3550
3551         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3552                 tg3_power_down_phy(tp, do_low_power);
3553
3554         tg3_frob_aux_power(tp, true);
3555
3556         /* Workaround for unstable PLL clock */
3557         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3558             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3559                 u32 val = tr32(0x7d00);
3560
3561                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3562                 tw32(0x7d00, val);
3563                 if (!tg3_flag(tp, ENABLE_ASF)) {
3564                         int err;
3565
3566                         err = tg3_nvram_lock(tp);
3567                         tg3_halt_cpu(tp, RX_CPU_BASE);
3568                         if (!err)
3569                                 tg3_nvram_unlock(tp);
3570                 }
3571         }
3572
3573         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3574
3575         return 0;
3576 }
3577
3578 static void tg3_power_down(struct tg3 *tp)
3579 {
3580         tg3_power_down_prepare(tp);
3581
3582         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3583         pci_set_power_state(tp->pdev, PCI_D3hot);
3584 }
3585
3586 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3587 {
3588         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3589         case MII_TG3_AUX_STAT_10HALF:
3590                 *speed = SPEED_10;
3591                 *duplex = DUPLEX_HALF;
3592                 break;
3593
3594         case MII_TG3_AUX_STAT_10FULL:
3595                 *speed = SPEED_10;
3596                 *duplex = DUPLEX_FULL;
3597                 break;
3598
3599         case MII_TG3_AUX_STAT_100HALF:
3600                 *speed = SPEED_100;
3601                 *duplex = DUPLEX_HALF;
3602                 break;
3603
3604         case MII_TG3_AUX_STAT_100FULL:
3605                 *speed = SPEED_100;
3606                 *duplex = DUPLEX_FULL;
3607                 break;
3608
3609         case MII_TG3_AUX_STAT_1000HALF:
3610                 *speed = SPEED_1000;
3611                 *duplex = DUPLEX_HALF;
3612                 break;
3613
3614         case MII_TG3_AUX_STAT_1000FULL:
3615                 *speed = SPEED_1000;
3616                 *duplex = DUPLEX_FULL;
3617                 break;
3618
3619         default:
3620                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3621                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3622                                  SPEED_10;
3623                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3624                                   DUPLEX_HALF;
3625                         break;
3626                 }
3627                 *speed = SPEED_INVALID;
3628                 *duplex = DUPLEX_INVALID;
3629                 break;
3630         }
3631 }
3632
3633 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3634 {
3635         int err = 0;
3636         u32 val, new_adv;
3637
3638         new_adv = ADVERTISE_CSMA;
3639         if (advertise & ADVERTISED_10baseT_Half)
3640                 new_adv |= ADVERTISE_10HALF;
3641         if (advertise & ADVERTISED_10baseT_Full)
3642                 new_adv |= ADVERTISE_10FULL;
3643         if (advertise & ADVERTISED_100baseT_Half)
3644                 new_adv |= ADVERTISE_100HALF;
3645         if (advertise & ADVERTISED_100baseT_Full)
3646                 new_adv |= ADVERTISE_100FULL;
3647
3648         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3649
3650         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3651         if (err)
3652                 goto done;
3653
3654         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3655                 goto done;
3656
3657         new_adv = 0;
3658         if (advertise & ADVERTISED_1000baseT_Half)
3659                 new_adv |= ADVERTISE_1000HALF;
3660         if (advertise & ADVERTISED_1000baseT_Full)
3661                 new_adv |= ADVERTISE_1000FULL;
3662
3663         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3664             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3665                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3666
3667         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3668         if (err)
3669                 goto done;
3670
3671         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3672                 goto done;
3673
3674         tw32(TG3_CPMU_EEE_MODE,
3675              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3676
3677         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
3678         if (!err) {
3679                 u32 err2;
3680
3681                 val = 0;
3682                 /* Advertise 100-BaseTX EEE ability */
3683                 if (advertise & ADVERTISED_100baseT_Full)
3684                         val |= MDIO_AN_EEE_ADV_100TX;
3685                 /* Advertise 1000-BaseT EEE ability */
3686                 if (advertise & ADVERTISED_1000baseT_Full)
3687                         val |= MDIO_AN_EEE_ADV_1000T;
3688                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3689                 if (err)
3690                         val = 0;
3691
3692                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3693                 case ASIC_REV_5717:
3694                 case ASIC_REV_57765:
3695                 case ASIC_REV_5719:
3696                         /* If we advertised any eee advertisements above... */
3697                         if (val)
3698                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3699                                       MII_TG3_DSP_TAP26_RMRXSTO |
3700                                       MII_TG3_DSP_TAP26_OPCSINPT;
3701                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3702                         /* Fall through */
3703                 case ASIC_REV_5720:
3704                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3705                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3706                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3707                 }
3708
3709                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
3710                 if (!err)
3711                         err = err2;
3712         }
3713
3714 done:
3715         return err;
3716 }
3717
3718 static void tg3_phy_copper_begin(struct tg3 *tp)
3719 {
3720         u32 new_adv;
3721         int i;
3722
3723         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3724                 new_adv = ADVERTISED_10baseT_Half |
3725                           ADVERTISED_10baseT_Full;
3726                 if (tg3_flag(tp, WOL_SPEED_100MB))
3727                         new_adv |= ADVERTISED_100baseT_Half |
3728                                    ADVERTISED_100baseT_Full;
3729
3730                 tg3_phy_autoneg_cfg(tp, new_adv,
3731                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3732         } else if (tp->link_config.speed == SPEED_INVALID) {
3733                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3734                         tp->link_config.advertising &=
3735                                 ~(ADVERTISED_1000baseT_Half |
3736                                   ADVERTISED_1000baseT_Full);
3737
3738                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3739                                     tp->link_config.flowctrl);
3740         } else {
3741                 /* Asking for a specific link mode. */
3742                 if (tp->link_config.speed == SPEED_1000) {
3743                         if (tp->link_config.duplex == DUPLEX_FULL)
3744                                 new_adv = ADVERTISED_1000baseT_Full;
3745                         else
3746                                 new_adv = ADVERTISED_1000baseT_Half;
3747                 } else if (tp->link_config.speed == SPEED_100) {
3748                         if (tp->link_config.duplex == DUPLEX_FULL)
3749                                 new_adv = ADVERTISED_100baseT_Full;
3750                         else
3751                                 new_adv = ADVERTISED_100baseT_Half;
3752                 } else {
3753                         if (tp->link_config.duplex == DUPLEX_FULL)
3754                                 new_adv = ADVERTISED_10baseT_Full;
3755                         else
3756                                 new_adv = ADVERTISED_10baseT_Half;
3757                 }
3758
3759                 tg3_phy_autoneg_cfg(tp, new_adv,
3760                                     tp->link_config.flowctrl);
3761         }
3762
3763         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3764             tp->link_config.speed != SPEED_INVALID) {
3765                 u32 bmcr, orig_bmcr;
3766
3767                 tp->link_config.active_speed = tp->link_config.speed;
3768                 tp->link_config.active_duplex = tp->link_config.duplex;
3769
3770                 bmcr = 0;
3771                 switch (tp->link_config.speed) {
3772                 default:
3773                 case SPEED_10:
3774                         break;
3775
3776                 case SPEED_100:
3777                         bmcr |= BMCR_SPEED100;
3778                         break;
3779
3780                 case SPEED_1000:
3781                         bmcr |= BMCR_SPEED1000;
3782                         break;
3783                 }
3784
3785                 if (tp->link_config.duplex == DUPLEX_FULL)
3786                         bmcr |= BMCR_FULLDPLX;
3787
3788                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3789                     (bmcr != orig_bmcr)) {
3790                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3791                         for (i = 0; i < 1500; i++) {
3792                                 u32 tmp;
3793
3794                                 udelay(10);
3795                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3796                                     tg3_readphy(tp, MII_BMSR, &tmp))
3797                                         continue;
3798                                 if (!(tmp & BMSR_LSTATUS)) {
3799                                         udelay(40);
3800                                         break;
3801                                 }
3802                         }
3803                         tg3_writephy(tp, MII_BMCR, bmcr);
3804                         udelay(40);
3805                 }
3806         } else {
3807                 tg3_writephy(tp, MII_BMCR,
3808                              BMCR_ANENABLE | BMCR_ANRESTART);
3809         }
3810 }
3811
3812 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3813 {
3814         int err;
3815
3816         /* Turn off tap power management. */
3817         /* Set Extended packet length bit */
3818         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3819
3820         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3821         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3822         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3823         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3824         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3825
3826         udelay(40);
3827
3828         return err;
3829 }
3830
3831 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3832 {
3833         u32 adv_reg, all_mask = 0;
3834
3835         if (mask & ADVERTISED_10baseT_Half)
3836                 all_mask |= ADVERTISE_10HALF;
3837         if (mask & ADVERTISED_10baseT_Full)
3838                 all_mask |= ADVERTISE_10FULL;
3839         if (mask & ADVERTISED_100baseT_Half)
3840                 all_mask |= ADVERTISE_100HALF;
3841         if (mask & ADVERTISED_100baseT_Full)
3842                 all_mask |= ADVERTISE_100FULL;
3843
3844         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3845                 return 0;
3846
3847         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3848                 return 0;
3849
3850         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3851                 u32 tg3_ctrl;
3852
3853                 all_mask = 0;
3854                 if (mask & ADVERTISED_1000baseT_Half)
3855                         all_mask |= ADVERTISE_1000HALF;
3856                 if (mask & ADVERTISED_1000baseT_Full)
3857                         all_mask |= ADVERTISE_1000FULL;
3858
3859                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3860                         return 0;
3861
3862                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3863                 if (tg3_ctrl != all_mask)
3864                         return 0;
3865         }
3866
3867         return 1;
3868 }
3869
3870 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3871 {
3872         u32 curadv, reqadv;
3873
3874         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3875                 return 1;
3876
3877         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3878         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3879
3880         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3881                 if (curadv != reqadv)
3882                         return 0;
3883
3884                 if (tg3_flag(tp, PAUSE_AUTONEG))
3885                         tg3_readphy(tp, MII_LPA, rmtadv);
3886         } else {
3887                 /* Reprogram the advertisement register, even if it
3888                  * does not affect the current link.  If the link
3889                  * gets renegotiated in the future, we can save an
3890                  * additional renegotiation cycle by advertising
3891                  * it correctly in the first place.
3892                  */
3893                 if (curadv != reqadv) {
3894                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3895                                      ADVERTISE_PAUSE_ASYM);
3896                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3897                 }
3898         }
3899
3900         return 1;
3901 }
3902
3903 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3904 {
3905         int current_link_up;
3906         u32 bmsr, val;
3907         u32 lcl_adv, rmt_adv;
3908         u16 current_speed;
3909         u8 current_duplex;
3910         int i, err;
3911
3912         tw32(MAC_EVENT, 0);
3913
3914         tw32_f(MAC_STATUS,
3915              (MAC_STATUS_SYNC_CHANGED |
3916               MAC_STATUS_CFG_CHANGED |
3917               MAC_STATUS_MI_COMPLETION |
3918               MAC_STATUS_LNKSTATE_CHANGED));
3919         udelay(40);
3920
3921         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3922                 tw32_f(MAC_MI_MODE,
3923                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3924                 udelay(80);
3925         }
3926
3927         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3928
3929         /* Some third-party PHYs need to be reset on link going
3930          * down.
3931          */
3932         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3933              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3934              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3935             netif_carrier_ok(tp->dev)) {
3936                 tg3_readphy(tp, MII_BMSR, &bmsr);
3937                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3938                     !(bmsr & BMSR_LSTATUS))
3939                         force_reset = 1;
3940         }
3941         if (force_reset)
3942                 tg3_phy_reset(tp);
3943
3944         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3945                 tg3_readphy(tp, MII_BMSR, &bmsr);
3946                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3947                     !tg3_flag(tp, INIT_COMPLETE))
3948                         bmsr = 0;
3949
3950                 if (!(bmsr & BMSR_LSTATUS)) {
3951                         err = tg3_init_5401phy_dsp(tp);
3952                         if (err)
3953                                 return err;
3954
3955                         tg3_readphy(tp, MII_BMSR, &bmsr);
3956                         for (i = 0; i < 1000; i++) {
3957                                 udelay(10);
3958                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3959                                     (bmsr & BMSR_LSTATUS)) {
3960                                         udelay(40);
3961                                         break;
3962                                 }
3963                         }
3964
3965                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3966                             TG3_PHY_REV_BCM5401_B0 &&
3967                             !(bmsr & BMSR_LSTATUS) &&
3968                             tp->link_config.active_speed == SPEED_1000) {
3969                                 err = tg3_phy_reset(tp);
3970                                 if (!err)
3971                                         err = tg3_init_5401phy_dsp(tp);
3972                                 if (err)
3973                                         return err;
3974                         }
3975                 }
3976         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3977                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3978                 /* 5701 {A0,B0} CRC bug workaround */
3979                 tg3_writephy(tp, 0x15, 0x0a75);
3980                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3981                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3982                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3983         }
3984
3985         /* Clear pending interrupts... */
3986         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3987         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3988
3989         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3990                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3991         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3992                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3993
3994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3996                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3997                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3998                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3999                 else
4000                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4001         }
4002
4003         current_link_up = 0;
4004         current_speed = SPEED_INVALID;
4005         current_duplex = DUPLEX_INVALID;
4006
4007         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4008                 err = tg3_phy_auxctl_read(tp,
4009                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4010                                           &val);
4011                 if (!err && !(val & (1 << 10))) {
4012                         tg3_phy_auxctl_write(tp,
4013                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4014                                              val | (1 << 10));
4015                         goto relink;
4016                 }
4017         }
4018
4019         bmsr = 0;
4020         for (i = 0; i < 100; i++) {
4021                 tg3_readphy(tp, MII_BMSR, &bmsr);
4022                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4023                     (bmsr & BMSR_LSTATUS))
4024                         break;
4025                 udelay(40);
4026         }
4027
4028         if (bmsr & BMSR_LSTATUS) {
4029                 u32 aux_stat, bmcr;
4030
4031                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4032                 for (i = 0; i < 2000; i++) {
4033                         udelay(10);
4034                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4035                             aux_stat)
4036                                 break;
4037                 }
4038
4039                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4040                                              &current_speed,
4041                                              &current_duplex);
4042
4043                 bmcr = 0;
4044                 for (i = 0; i < 200; i++) {
4045                         tg3_readphy(tp, MII_BMCR, &bmcr);
4046                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4047                                 continue;
4048                         if (bmcr && bmcr != 0x7fff)
4049                                 break;
4050                         udelay(10);
4051                 }
4052
4053                 lcl_adv = 0;
4054                 rmt_adv = 0;
4055
4056                 tp->link_config.active_speed = current_speed;
4057                 tp->link_config.active_duplex = current_duplex;
4058
4059                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4060                         if ((bmcr & BMCR_ANENABLE) &&
4061                             tg3_copper_is_advertising_all(tp,
4062                                                 tp->link_config.advertising)) {
4063                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4064                                                                   &rmt_adv))
4065                                         current_link_up = 1;
4066                         }
4067                 } else {
4068                         if (!(bmcr & BMCR_ANENABLE) &&
4069                             tp->link_config.speed == current_speed &&
4070                             tp->link_config.duplex == current_duplex &&
4071                             tp->link_config.flowctrl ==
4072                             tp->link_config.active_flowctrl) {
4073                                 current_link_up = 1;
4074                         }
4075                 }
4076
4077                 if (current_link_up == 1 &&
4078                     tp->link_config.active_duplex == DUPLEX_FULL)
4079                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4080         }
4081
4082 relink:
4083         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4084                 tg3_phy_copper_begin(tp);
4085
4086                 tg3_readphy(tp, MII_BMSR, &bmsr);
4087                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4088                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4089                         current_link_up = 1;
4090         }
4091
4092         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4093         if (current_link_up == 1) {
4094                 if (tp->link_config.active_speed == SPEED_100 ||
4095                     tp->link_config.active_speed == SPEED_10)
4096                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4097                 else
4098                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4099         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4100                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4101         else
4102                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4103
4104         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4105         if (tp->link_config.active_duplex == DUPLEX_HALF)
4106                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4107
4108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4109                 if (current_link_up == 1 &&
4110                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4111                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4112                 else
4113                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4114         }
4115
4116         /* ??? Without this setting Netgear GA302T PHY does not
4117          * ??? send/receive packets...
4118          */
4119         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4120             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4121                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4122                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4123                 udelay(80);
4124         }
4125
4126         tw32_f(MAC_MODE, tp->mac_mode);
4127         udelay(40);
4128
4129         tg3_phy_eee_adjust(tp, current_link_up);
4130
4131         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4132                 /* Polled via timer. */
4133                 tw32_f(MAC_EVENT, 0);
4134         } else {
4135                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4136         }
4137         udelay(40);
4138
4139         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4140             current_link_up == 1 &&
4141             tp->link_config.active_speed == SPEED_1000 &&
4142             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4143                 udelay(120);
4144                 tw32_f(MAC_STATUS,
4145                      (MAC_STATUS_SYNC_CHANGED |
4146                       MAC_STATUS_CFG_CHANGED));
4147                 udelay(40);
4148                 tg3_write_mem(tp,
4149                               NIC_SRAM_FIRMWARE_MBOX,
4150                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4151         }
4152
4153         /* Prevent send BD corruption. */
4154         if (tg3_flag(tp, CLKREQ_BUG)) {
4155                 u16 oldlnkctl, newlnkctl;
4156
4157                 pci_read_config_word(tp->pdev,
4158                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4159                                      &oldlnkctl);
4160                 if (tp->link_config.active_speed == SPEED_100 ||
4161                     tp->link_config.active_speed == SPEED_10)
4162                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4163                 else
4164                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4165                 if (newlnkctl != oldlnkctl)
4166                         pci_write_config_word(tp->pdev,
4167                                               pci_pcie_cap(tp->pdev) +
4168                                               PCI_EXP_LNKCTL, newlnkctl);
4169         }
4170
4171         if (current_link_up != netif_carrier_ok(tp->dev)) {
4172                 if (current_link_up)
4173                         netif_carrier_on(tp->dev);
4174                 else
4175                         netif_carrier_off(tp->dev);
4176                 tg3_link_report(tp);
4177         }
4178
4179         return 0;
4180 }
4181
4182 struct tg3_fiber_aneginfo {
4183         int state;
4184 #define ANEG_STATE_UNKNOWN              0
4185 #define ANEG_STATE_AN_ENABLE            1
4186 #define ANEG_STATE_RESTART_INIT         2
4187 #define ANEG_STATE_RESTART              3
4188 #define ANEG_STATE_DISABLE_LINK_OK      4
4189 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4190 #define ANEG_STATE_ABILITY_DETECT       6
4191 #define ANEG_STATE_ACK_DETECT_INIT      7
4192 #define ANEG_STATE_ACK_DETECT           8
4193 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4194 #define ANEG_STATE_COMPLETE_ACK         10
4195 #define ANEG_STATE_IDLE_DETECT_INIT     11
4196 #define ANEG_STATE_IDLE_DETECT          12
4197 #define ANEG_STATE_LINK_OK              13
4198 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4199 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4200
4201         u32 flags;
4202 #define MR_AN_ENABLE            0x00000001
4203 #define MR_RESTART_AN           0x00000002
4204 #define MR_AN_COMPLETE          0x00000004
4205 #define MR_PAGE_RX              0x00000008
4206 #define MR_NP_LOADED            0x00000010
4207 #define MR_TOGGLE_TX            0x00000020
4208 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4209 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4210 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4211 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4212 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4213 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4214 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4215 #define MR_TOGGLE_RX            0x00002000
4216 #define MR_NP_RX                0x00004000
4217
4218 #define MR_LINK_OK              0x80000000
4219
4220         unsigned long link_time, cur_time;
4221
4222         u32 ability_match_cfg;
4223         int ability_match_count;
4224
4225         char ability_match, idle_match, ack_match;
4226
4227         u32 txconfig, rxconfig;
4228 #define ANEG_CFG_NP             0x00000080
4229 #define ANEG_CFG_ACK            0x00000040
4230 #define ANEG_CFG_RF2            0x00000020
4231 #define ANEG_CFG_RF1            0x00000010
4232 #define ANEG_CFG_PS2            0x00000001
4233 #define ANEG_CFG_PS1            0x00008000
4234 #define ANEG_CFG_HD             0x00004000
4235 #define ANEG_CFG_FD             0x00002000
4236 #define ANEG_CFG_INVAL          0x00001f06
4237
4238 };
4239 #define ANEG_OK         0
4240 #define ANEG_DONE       1
4241 #define ANEG_TIMER_ENAB 2
4242 #define ANEG_FAILED     -1
4243
4244 #define ANEG_STATE_SETTLE_TIME  10000
4245
4246 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4247                                    struct tg3_fiber_aneginfo *ap)
4248 {
4249         u16 flowctrl;
4250         unsigned long delta;
4251         u32 rx_cfg_reg;
4252         int ret;
4253
4254         if (ap->state == ANEG_STATE_UNKNOWN) {
4255                 ap->rxconfig = 0;
4256                 ap->link_time = 0;
4257                 ap->cur_time = 0;
4258                 ap->ability_match_cfg = 0;
4259                 ap->ability_match_count = 0;
4260                 ap->ability_match = 0;
4261                 ap->idle_match = 0;
4262                 ap->ack_match = 0;
4263         }
4264         ap->cur_time++;
4265
4266         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4267                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4268
4269                 if (rx_cfg_reg != ap->ability_match_cfg) {
4270                         ap->ability_match_cfg = rx_cfg_reg;
4271                         ap->ability_match = 0;
4272                         ap->ability_match_count = 0;
4273                 } else {
4274                         if (++ap->ability_match_count > 1) {
4275                                 ap->ability_match = 1;
4276                                 ap->ability_match_cfg = rx_cfg_reg;
4277                         }
4278                 }
4279                 if (rx_cfg_reg & ANEG_CFG_ACK)
4280                         ap->ack_match = 1;
4281                 else
4282                         ap->ack_match = 0;
4283
4284                 ap->idle_match = 0;
4285         } else {
4286                 ap->idle_match = 1;
4287                 ap->ability_match_cfg = 0;
4288                 ap->ability_match_count = 0;
4289                 ap->ability_match = 0;
4290                 ap->ack_match = 0;
4291
4292                 rx_cfg_reg = 0;
4293         }
4294
4295         ap->rxconfig = rx_cfg_reg;
4296         ret = ANEG_OK;
4297
4298         switch (ap->state) {
4299         case ANEG_STATE_UNKNOWN:
4300                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4301                         ap->state = ANEG_STATE_AN_ENABLE;
4302
4303                 /* fallthru */
4304         case ANEG_STATE_AN_ENABLE:
4305                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4306                 if (ap->flags & MR_AN_ENABLE) {
4307                         ap->link_time = 0;
4308                         ap->cur_time = 0;
4309                         ap->ability_match_cfg = 0;
4310                         ap->ability_match_count = 0;
4311                         ap->ability_match = 0;
4312                         ap->idle_match = 0;
4313                         ap->ack_match = 0;
4314
4315                         ap->state = ANEG_STATE_RESTART_INIT;
4316                 } else {
4317                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4318                 }
4319                 break;
4320
4321         case ANEG_STATE_RESTART_INIT:
4322                 ap->link_time = ap->cur_time;
4323                 ap->flags &= ~(MR_NP_LOADED);
4324                 ap->txconfig = 0;
4325                 tw32(MAC_TX_AUTO_NEG, 0);
4326                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327                 tw32_f(MAC_MODE, tp->mac_mode);
4328                 udelay(40);
4329
4330                 ret = ANEG_TIMER_ENAB;
4331                 ap->state = ANEG_STATE_RESTART;
4332
4333                 /* fallthru */
4334         case ANEG_STATE_RESTART:
4335                 delta = ap->cur_time - ap->link_time;
4336                 if (delta > ANEG_STATE_SETTLE_TIME)
4337                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4338                 else
4339                         ret = ANEG_TIMER_ENAB;
4340                 break;
4341
4342         case ANEG_STATE_DISABLE_LINK_OK:
4343                 ret = ANEG_DONE;
4344                 break;
4345
4346         case ANEG_STATE_ABILITY_DETECT_INIT:
4347                 ap->flags &= ~(MR_TOGGLE_TX);
4348                 ap->txconfig = ANEG_CFG_FD;
4349                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4350                 if (flowctrl & ADVERTISE_1000XPAUSE)
4351                         ap->txconfig |= ANEG_CFG_PS1;
4352                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4353                         ap->txconfig |= ANEG_CFG_PS2;
4354                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4355                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4356                 tw32_f(MAC_MODE, tp->mac_mode);
4357                 udelay(40);
4358
4359                 ap->state = ANEG_STATE_ABILITY_DETECT;
4360                 break;
4361
4362         case ANEG_STATE_ABILITY_DETECT:
4363                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4364                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4365                 break;
4366
4367         case ANEG_STATE_ACK_DETECT_INIT:
4368                 ap->txconfig |= ANEG_CFG_ACK;
4369                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4370                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4371                 tw32_f(MAC_MODE, tp->mac_mode);
4372                 udelay(40);
4373
4374                 ap->state = ANEG_STATE_ACK_DETECT;
4375
4376                 /* fallthru */
4377         case ANEG_STATE_ACK_DETECT:
4378                 if (ap->ack_match != 0) {
4379                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4380                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4381                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4382                         } else {
4383                                 ap->state = ANEG_STATE_AN_ENABLE;
4384                         }
4385                 } else if (ap->ability_match != 0 &&
4386                            ap->rxconfig == 0) {
4387                         ap->state = ANEG_STATE_AN_ENABLE;
4388                 }
4389                 break;
4390
4391         case ANEG_STATE_COMPLETE_ACK_INIT:
4392                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4393                         ret = ANEG_FAILED;
4394                         break;
4395                 }
4396                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4397                                MR_LP_ADV_HALF_DUPLEX |
4398                                MR_LP_ADV_SYM_PAUSE |
4399                                MR_LP_ADV_ASYM_PAUSE |
4400                                MR_LP_ADV_REMOTE_FAULT1 |
4401                                MR_LP_ADV_REMOTE_FAULT2 |
4402                                MR_LP_ADV_NEXT_PAGE |
4403                                MR_TOGGLE_RX |
4404                                MR_NP_RX);
4405                 if (ap->rxconfig & ANEG_CFG_FD)
4406                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4407                 if (ap->rxconfig & ANEG_CFG_HD)
4408                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4409                 if (ap->rxconfig & ANEG_CFG_PS1)
4410                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4411                 if (ap->rxconfig & ANEG_CFG_PS2)
4412                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4413                 if (ap->rxconfig & ANEG_CFG_RF1)
4414                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4415                 if (ap->rxconfig & ANEG_CFG_RF2)
4416                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4417                 if (ap->rxconfig & ANEG_CFG_NP)
4418                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4419
4420                 ap->link_time = ap->cur_time;
4421
4422                 ap->flags ^= (MR_TOGGLE_TX);
4423                 if (ap->rxconfig & 0x0008)
4424                         ap->flags |= MR_TOGGLE_RX;
4425                 if (ap->rxconfig & ANEG_CFG_NP)
4426                         ap->flags |= MR_NP_RX;
4427                 ap->flags |= MR_PAGE_RX;
4428
4429                 ap->state = ANEG_STATE_COMPLETE_ACK;
4430                 ret = ANEG_TIMER_ENAB;
4431                 break;
4432
4433         case ANEG_STATE_COMPLETE_ACK:
4434                 if (ap->ability_match != 0 &&
4435                     ap->rxconfig == 0) {
4436                         ap->state = ANEG_STATE_AN_ENABLE;
4437                         break;
4438                 }
4439                 delta = ap->cur_time - ap->link_time;
4440                 if (delta > ANEG_STATE_SETTLE_TIME) {
4441                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4442                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4443                         } else {
4444                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4445                                     !(ap->flags & MR_NP_RX)) {
4446                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4447                                 } else {
4448                                         ret = ANEG_FAILED;
4449                                 }
4450                         }
4451                 }
4452                 break;
4453
4454         case ANEG_STATE_IDLE_DETECT_INIT:
4455                 ap->link_time = ap->cur_time;
4456                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4457                 tw32_f(MAC_MODE, tp->mac_mode);
4458                 udelay(40);
4459
4460                 ap->state = ANEG_STATE_IDLE_DETECT;
4461                 ret = ANEG_TIMER_ENAB;
4462                 break;
4463
4464         case ANEG_STATE_IDLE_DETECT:
4465                 if (ap->ability_match != 0 &&
4466                     ap->rxconfig == 0) {
4467                         ap->state = ANEG_STATE_AN_ENABLE;
4468                         break;
4469                 }
4470                 delta = ap->cur_time - ap->link_time;
4471                 if (delta > ANEG_STATE_SETTLE_TIME) {
4472                         /* XXX another gem from the Broadcom driver :( */
4473                         ap->state = ANEG_STATE_LINK_OK;
4474                 }
4475                 break;
4476
4477         case ANEG_STATE_LINK_OK:
4478                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4479                 ret = ANEG_DONE;
4480                 break;
4481
4482         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4483                 /* ??? unimplemented */
4484                 break;
4485
4486         case ANEG_STATE_NEXT_PAGE_WAIT:
4487                 /* ??? unimplemented */
4488                 break;
4489
4490         default:
4491                 ret = ANEG_FAILED;
4492                 break;
4493         }
4494
4495         return ret;
4496 }
4497
4498 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4499 {
4500         int res = 0;
4501         struct tg3_fiber_aneginfo aninfo;
4502         int status = ANEG_FAILED;
4503         unsigned int tick;
4504         u32 tmp;
4505
4506         tw32_f(MAC_TX_AUTO_NEG, 0);
4507
4508         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4509         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4510         udelay(40);
4511
4512         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4513         udelay(40);
4514
4515         memset(&aninfo, 0, sizeof(aninfo));
4516         aninfo.flags |= MR_AN_ENABLE;
4517         aninfo.state = ANEG_STATE_UNKNOWN;
4518         aninfo.cur_time = 0;
4519         tick = 0;
4520         while (++tick < 195000) {
4521                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4522                 if (status == ANEG_DONE || status == ANEG_FAILED)
4523                         break;
4524
4525                 udelay(1);
4526         }
4527
4528         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4529         tw32_f(MAC_MODE, tp->mac_mode);
4530         udelay(40);
4531
4532         *txflags = aninfo.txconfig;
4533         *rxflags = aninfo.flags;
4534
4535         if (status == ANEG_DONE &&
4536             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4537                              MR_LP_ADV_FULL_DUPLEX)))
4538                 res = 1;
4539
4540         return res;
4541 }
4542
4543 static void tg3_init_bcm8002(struct tg3 *tp)
4544 {
4545         u32 mac_status = tr32(MAC_STATUS);
4546         int i;
4547
4548         /* Reset when initting first time or we have a link. */
4549         if (tg3_flag(tp, INIT_COMPLETE) &&
4550             !(mac_status & MAC_STATUS_PCS_SYNCED))
4551                 return;
4552
4553         /* Set PLL lock range. */
4554         tg3_writephy(tp, 0x16, 0x8007);
4555
4556         /* SW reset */
4557         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4558
4559         /* Wait for reset to complete. */
4560         /* XXX schedule_timeout() ... */
4561         for (i = 0; i < 500; i++)
4562                 udelay(10);
4563
4564         /* Config mode; select PMA/Ch 1 regs. */
4565         tg3_writephy(tp, 0x10, 0x8411);
4566
4567         /* Enable auto-lock and comdet, select txclk for tx. */
4568         tg3_writephy(tp, 0x11, 0x0a10);
4569
4570         tg3_writephy(tp, 0x18, 0x00a0);
4571         tg3_writephy(tp, 0x16, 0x41ff);
4572
4573         /* Assert and deassert POR. */
4574         tg3_writephy(tp, 0x13, 0x0400);
4575         udelay(40);
4576         tg3_writephy(tp, 0x13, 0x0000);
4577
4578         tg3_writephy(tp, 0x11, 0x0a50);
4579         udelay(40);
4580         tg3_writephy(tp, 0x11, 0x0a10);
4581
4582         /* Wait for signal to stabilize */
4583         /* XXX schedule_timeout() ... */
4584         for (i = 0; i < 15000; i++)
4585                 udelay(10);
4586
4587         /* Deselect the channel register so we can read the PHYID
4588          * later.
4589          */
4590         tg3_writephy(tp, 0x10, 0x8011);
4591 }
4592
4593 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4594 {
4595         u16 flowctrl;
4596         u32 sg_dig_ctrl, sg_dig_status;
4597         u32 serdes_cfg, expected_sg_dig_ctrl;
4598         int workaround, port_a;
4599         int current_link_up;
4600
4601         serdes_cfg = 0;
4602         expected_sg_dig_ctrl = 0;
4603         workaround = 0;
4604         port_a = 1;
4605         current_link_up = 0;
4606
4607         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4608             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4609                 workaround = 1;
4610                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4611                         port_a = 0;
4612
4613                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4614                 /* preserve bits 20-23 for voltage regulator */
4615                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4616         }
4617
4618         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4619
4620         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4621                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4622                         if (workaround) {
4623                                 u32 val = serdes_cfg;
4624
4625                                 if (port_a)
4626                                         val |= 0xc010000;
4627                                 else
4628                                         val |= 0x4010000;
4629                                 tw32_f(MAC_SERDES_CFG, val);
4630                         }
4631
4632                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4633                 }
4634                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4635                         tg3_setup_flow_control(tp, 0, 0);
4636                         current_link_up = 1;
4637                 }
4638                 goto out;
4639         }
4640
4641         /* Want auto-negotiation.  */
4642         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4643
4644         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4645         if (flowctrl & ADVERTISE_1000XPAUSE)
4646                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4647         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4648                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4649
4650         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4651                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4652                     tp->serdes_counter &&
4653                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4654                                     MAC_STATUS_RCVD_CFG)) ==
4655                      MAC_STATUS_PCS_SYNCED)) {
4656                         tp->serdes_counter--;
4657                         current_link_up = 1;
4658                         goto out;
4659                 }
4660 restart_autoneg:
4661                 if (workaround)
4662                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4663                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4664                 udelay(5);
4665                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4666
4667                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4668                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4669         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4670                                  MAC_STATUS_SIGNAL_DET)) {
4671                 sg_dig_status = tr32(SG_DIG_STATUS);
4672                 mac_status = tr32(MAC_STATUS);
4673
4674                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4675                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4676                         u32 local_adv = 0, remote_adv = 0;
4677
4678                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4679                                 local_adv |= ADVERTISE_1000XPAUSE;
4680                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4681                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4682
4683                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4684                                 remote_adv |= LPA_1000XPAUSE;
4685                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4686                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4687
4688                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4689                         current_link_up = 1;
4690                         tp->serdes_counter = 0;
4691                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4692                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4693                         if (tp->serdes_counter)
4694                                 tp->serdes_counter--;
4695                         else {
4696                                 if (workaround) {
4697                                         u32 val = serdes_cfg;
4698
4699                                         if (port_a)
4700                                                 val |= 0xc010000;
4701                                         else
4702                                                 val |= 0x4010000;
4703
4704                                         tw32_f(MAC_SERDES_CFG, val);
4705                                 }
4706
4707                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4708                                 udelay(40);
4709
4710                                 /* Link parallel detection - link is up */
4711                                 /* only if we have PCS_SYNC and not */
4712                                 /* receiving config code words */
4713                                 mac_status = tr32(MAC_STATUS);
4714                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4715                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4716                                         tg3_setup_flow_control(tp, 0, 0);
4717                                         current_link_up = 1;
4718                                         tp->phy_flags |=
4719                                                 TG3_PHYFLG_PARALLEL_DETECT;
4720                                         tp->serdes_counter =
4721                                                 SERDES_PARALLEL_DET_TIMEOUT;
4722                                 } else
4723                                         goto restart_autoneg;
4724                         }
4725                 }
4726         } else {
4727                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4728                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4729         }
4730
4731 out:
4732         return current_link_up;
4733 }
4734
4735 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4736 {
4737         int current_link_up = 0;
4738
4739         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4740                 goto out;
4741
4742         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4743                 u32 txflags, rxflags;
4744                 int i;
4745
4746                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4747                         u32 local_adv = 0, remote_adv = 0;
4748
4749                         if (txflags & ANEG_CFG_PS1)
4750                                 local_adv |= ADVERTISE_1000XPAUSE;
4751                         if (txflags & ANEG_CFG_PS2)
4752                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4753
4754                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4755                                 remote_adv |= LPA_1000XPAUSE;
4756                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4757                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4758
4759                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4760
4761                         current_link_up = 1;
4762                 }
4763                 for (i = 0; i < 30; i++) {
4764                         udelay(20);
4765                         tw32_f(MAC_STATUS,
4766                                (MAC_STATUS_SYNC_CHANGED |
4767                                 MAC_STATUS_CFG_CHANGED));
4768                         udelay(40);
4769                         if ((tr32(MAC_STATUS) &
4770                              (MAC_STATUS_SYNC_CHANGED |
4771                               MAC_STATUS_CFG_CHANGED)) == 0)
4772                                 break;
4773                 }
4774
4775                 mac_status = tr32(MAC_STATUS);
4776                 if (current_link_up == 0 &&
4777                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4778                     !(mac_status & MAC_STATUS_RCVD_CFG))
4779                         current_link_up = 1;
4780         } else {
4781                 tg3_setup_flow_control(tp, 0, 0);
4782
4783                 /* Forcing 1000FD link up. */
4784                 current_link_up = 1;
4785
4786                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4787                 udelay(40);
4788
4789                 tw32_f(MAC_MODE, tp->mac_mode);
4790                 udelay(40);
4791         }
4792
4793 out:
4794         return current_link_up;
4795 }
4796
4797 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4798 {
4799         u32 orig_pause_cfg;
4800         u16 orig_active_speed;
4801         u8 orig_active_duplex;
4802         u32 mac_status;
4803         int current_link_up;
4804         int i;
4805
4806         orig_pause_cfg = tp->link_config.active_flowctrl;
4807         orig_active_speed = tp->link_config.active_speed;
4808         orig_active_duplex = tp->link_config.active_duplex;
4809
4810         if (!tg3_flag(tp, HW_AUTONEG) &&
4811             netif_carrier_ok(tp->dev) &&
4812             tg3_flag(tp, INIT_COMPLETE)) {
4813                 mac_status = tr32(MAC_STATUS);
4814                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4815                                MAC_STATUS_SIGNAL_DET |
4816                                MAC_STATUS_CFG_CHANGED |
4817                                MAC_STATUS_RCVD_CFG);
4818                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4819                                    MAC_STATUS_SIGNAL_DET)) {
4820                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4821                                             MAC_STATUS_CFG_CHANGED));
4822                         return 0;
4823                 }
4824         }
4825
4826         tw32_f(MAC_TX_AUTO_NEG, 0);
4827
4828         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4829         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4830         tw32_f(MAC_MODE, tp->mac_mode);
4831         udelay(40);
4832
4833         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4834                 tg3_init_bcm8002(tp);
4835
4836         /* Enable link change event even when serdes polling.  */
4837         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4838         udelay(40);
4839
4840         current_link_up = 0;
4841         mac_status = tr32(MAC_STATUS);
4842
4843         if (tg3_flag(tp, HW_AUTONEG))
4844                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4845         else
4846                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4847
4848         tp->napi[0].hw_status->status =
4849                 (SD_STATUS_UPDATED |
4850                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4851
4852         for (i = 0; i < 100; i++) {
4853                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4854                                     MAC_STATUS_CFG_CHANGED));
4855                 udelay(5);
4856                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4857                                          MAC_STATUS_CFG_CHANGED |
4858                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4859                         break;
4860         }
4861
4862         mac_status = tr32(MAC_STATUS);
4863         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4864                 current_link_up = 0;
4865                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4866                     tp->serdes_counter == 0) {
4867                         tw32_f(MAC_MODE, (tp->mac_mode |
4868                                           MAC_MODE_SEND_CONFIGS));
4869                         udelay(1);
4870                         tw32_f(MAC_MODE, tp->mac_mode);
4871                 }
4872         }
4873
4874         if (current_link_up == 1) {
4875                 tp->link_config.active_speed = SPEED_1000;
4876                 tp->link_config.active_duplex = DUPLEX_FULL;
4877                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4878                                     LED_CTRL_LNKLED_OVERRIDE |
4879                                     LED_CTRL_1000MBPS_ON));
4880         } else {
4881                 tp->link_config.active_speed = SPEED_INVALID;
4882                 tp->link_config.active_duplex = DUPLEX_INVALID;
4883                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4884                                     LED_CTRL_LNKLED_OVERRIDE |
4885                                     LED_CTRL_TRAFFIC_OVERRIDE));
4886         }
4887
4888         if (current_link_up != netif_carrier_ok(tp->dev)) {
4889                 if (current_link_up)
4890                         netif_carrier_on(tp->dev);
4891                 else
4892                         netif_carrier_off(tp->dev);
4893                 tg3_link_report(tp);
4894         } else {
4895                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4896                 if (orig_pause_cfg != now_pause_cfg ||
4897                     orig_active_speed != tp->link_config.active_speed ||
4898                     orig_active_duplex != tp->link_config.active_duplex)
4899                         tg3_link_report(tp);
4900         }
4901
4902         return 0;
4903 }
4904
4905 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4906 {
4907         int current_link_up, err = 0;
4908         u32 bmsr, bmcr;
4909         u16 current_speed;
4910         u8 current_duplex;
4911         u32 local_adv, remote_adv;
4912
4913         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4914         tw32_f(MAC_MODE, tp->mac_mode);
4915         udelay(40);
4916
4917         tw32(MAC_EVENT, 0);
4918
4919         tw32_f(MAC_STATUS,
4920              (MAC_STATUS_SYNC_CHANGED |
4921               MAC_STATUS_CFG_CHANGED |
4922               MAC_STATUS_MI_COMPLETION |
4923               MAC_STATUS_LNKSTATE_CHANGED));
4924         udelay(40);
4925
4926         if (force_reset)
4927                 tg3_phy_reset(tp);
4928
4929         current_link_up = 0;
4930         current_speed = SPEED_INVALID;
4931         current_duplex = DUPLEX_INVALID;
4932
4933         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4934         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4936                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4937                         bmsr |= BMSR_LSTATUS;
4938                 else
4939                         bmsr &= ~BMSR_LSTATUS;
4940         }
4941
4942         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4943
4944         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4945             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4946                 /* do nothing, just check for link up at the end */
4947         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4948                 u32 adv, new_adv;
4949
4950                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4951                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4952                                   ADVERTISE_1000XPAUSE |
4953                                   ADVERTISE_1000XPSE_ASYM |
4954                                   ADVERTISE_SLCT);
4955
4956                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4957
4958                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4959                         new_adv |= ADVERTISE_1000XHALF;
4960                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4961                         new_adv |= ADVERTISE_1000XFULL;
4962
4963                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4964                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4965                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4966                         tg3_writephy(tp, MII_BMCR, bmcr);
4967
4968                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4969                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4970                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4971
4972                         return err;
4973                 }
4974         } else {
4975                 u32 new_bmcr;
4976
4977                 bmcr &= ~BMCR_SPEED1000;
4978                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4979
4980                 if (tp->link_config.duplex == DUPLEX_FULL)
4981                         new_bmcr |= BMCR_FULLDPLX;
4982
4983                 if (new_bmcr != bmcr) {
4984                         /* BMCR_SPEED1000 is a reserved bit that needs
4985                          * to be set on write.
4986                          */
4987                         new_bmcr |= BMCR_SPEED1000;
4988
4989                         /* Force a linkdown */
4990                         if (netif_carrier_ok(tp->dev)) {
4991                                 u32 adv;
4992
4993                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4994                                 adv &= ~(ADVERTISE_1000XFULL |
4995                                          ADVERTISE_1000XHALF |
4996                                          ADVERTISE_SLCT);
4997                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4998                                 tg3_writephy(tp, MII_BMCR, bmcr |
4999                                                            BMCR_ANRESTART |
5000                                                            BMCR_ANENABLE);
5001                                 udelay(10);
5002                                 netif_carrier_off(tp->dev);
5003                         }
5004                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5005                         bmcr = new_bmcr;
5006                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5007                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5008                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5009                             ASIC_REV_5714) {
5010                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5011                                         bmsr |= BMSR_LSTATUS;
5012                                 else
5013                                         bmsr &= ~BMSR_LSTATUS;
5014                         }
5015                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5016                 }
5017         }
5018
5019         if (bmsr & BMSR_LSTATUS) {
5020                 current_speed = SPEED_1000;
5021                 current_link_up = 1;
5022                 if (bmcr & BMCR_FULLDPLX)
5023                         current_duplex = DUPLEX_FULL;
5024                 else
5025                         current_duplex = DUPLEX_HALF;
5026
5027                 local_adv = 0;
5028                 remote_adv = 0;
5029
5030                 if (bmcr & BMCR_ANENABLE) {
5031                         u32 common;
5032
5033                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5034                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5035                         common = local_adv & remote_adv;
5036                         if (common & (ADVERTISE_1000XHALF |
5037                                       ADVERTISE_1000XFULL)) {
5038                                 if (common & ADVERTISE_1000XFULL)
5039                                         current_duplex = DUPLEX_FULL;
5040                                 else
5041                                         current_duplex = DUPLEX_HALF;
5042                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5043                                 /* Link is up via parallel detect */
5044                         } else {
5045                                 current_link_up = 0;
5046                         }
5047                 }
5048         }
5049
5050         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5051                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5052
5053         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5054         if (tp->link_config.active_duplex == DUPLEX_HALF)
5055                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5056
5057         tw32_f(MAC_MODE, tp->mac_mode);
5058         udelay(40);
5059
5060         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5061
5062         tp->link_config.active_speed = current_speed;
5063         tp->link_config.active_duplex = current_duplex;
5064
5065         if (current_link_up != netif_carrier_ok(tp->dev)) {
5066                 if (current_link_up)
5067                         netif_carrier_on(tp->dev);
5068                 else {
5069                         netif_carrier_off(tp->dev);
5070                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5071                 }
5072                 tg3_link_report(tp);
5073         }
5074         return err;
5075 }
5076
5077 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5078 {
5079         if (tp->serdes_counter) {
5080                 /* Give autoneg time to complete. */
5081                 tp->serdes_counter--;
5082                 return;
5083         }
5084
5085         if (!netif_carrier_ok(tp->dev) &&
5086             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5087                 u32 bmcr;
5088
5089                 tg3_readphy(tp, MII_BMCR, &bmcr);
5090                 if (bmcr & BMCR_ANENABLE) {
5091                         u32 phy1, phy2;
5092
5093                         /* Select shadow register 0x1f */
5094                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5095                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5096
5097                         /* Select expansion interrupt status register */
5098                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5099                                          MII_TG3_DSP_EXP1_INT_STAT);
5100                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5101                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5102
5103                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5104                                 /* We have signal detect and not receiving
5105                                  * config code words, link is up by parallel
5106                                  * detection.
5107                                  */
5108
5109                                 bmcr &= ~BMCR_ANENABLE;
5110                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5111                                 tg3_writephy(tp, MII_BMCR, bmcr);
5112                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5113                         }
5114                 }
5115         } else if (netif_carrier_ok(tp->dev) &&
5116                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5117                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5118                 u32 phy2;
5119
5120                 /* Select expansion interrupt status register */
5121                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5122                                  MII_TG3_DSP_EXP1_INT_STAT);
5123                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5124                 if (phy2 & 0x20) {
5125                         u32 bmcr;
5126
5127                         /* Config code words received, turn on autoneg. */
5128                         tg3_readphy(tp, MII_BMCR, &bmcr);
5129                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5130
5131                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5132
5133                 }
5134         }
5135 }
5136
5137 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5138 {
5139         u32 val;
5140         int err;
5141
5142         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5143                 err = tg3_setup_fiber_phy(tp, force_reset);
5144         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5145                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5146         else
5147                 err = tg3_setup_copper_phy(tp, force_reset);
5148
5149         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5150                 u32 scale;
5151
5152                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5153                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5154                         scale = 65;
5155                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5156                         scale = 6;
5157                 else
5158                         scale = 12;
5159
5160                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5161                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5162                 tw32(GRC_MISC_CFG, val);
5163         }
5164
5165         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5166               (6 << TX_LENGTHS_IPG_SHIFT);
5167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5168                 val |= tr32(MAC_TX_LENGTHS) &
5169                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5170                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5171
5172         if (tp->link_config.active_speed == SPEED_1000 &&
5173             tp->link_config.active_duplex == DUPLEX_HALF)
5174                 tw32(MAC_TX_LENGTHS, val |
5175                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5176         else
5177                 tw32(MAC_TX_LENGTHS, val |
5178                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5179
5180         if (!tg3_flag(tp, 5705_PLUS)) {
5181                 if (netif_carrier_ok(tp->dev)) {
5182                         tw32(HOSTCC_STAT_COAL_TICKS,
5183                              tp->coal.stats_block_coalesce_usecs);
5184                 } else {
5185                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5186                 }
5187         }
5188
5189         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5190                 val = tr32(PCIE_PWR_MGMT_THRESH);
5191                 if (!netif_carrier_ok(tp->dev))
5192                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5193                               tp->pwrmgmt_thresh;
5194                 else
5195                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5196                 tw32(PCIE_PWR_MGMT_THRESH, val);
5197         }
5198
5199         return err;
5200 }
5201
5202 static inline int tg3_irq_sync(struct tg3 *tp)
5203 {
5204         return tp->irq_sync;
5205 }
5206
5207 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5208 {
5209         int i;
5210
5211         dst = (u32 *)((u8 *)dst + off);
5212         for (i = 0; i < len; i += sizeof(u32))
5213                 *dst++ = tr32(off + i);
5214 }
5215
5216 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5217 {
5218         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5219         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5220         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5221         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5222         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5223         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5224         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5225         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5226         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5227         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5228         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5229         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5230         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5231         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5232         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5233         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5234         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5235         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5236         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5237
5238         if (tg3_flag(tp, SUPPORT_MSIX))
5239                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5240
5241         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5242         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5243         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5244         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5245         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5246         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5247         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5248         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5249
5250         if (!tg3_flag(tp, 5705_PLUS)) {
5251                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5252                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5253                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5254         }
5255
5256         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5257         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5258         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5259         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5260         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5261
5262         if (tg3_flag(tp, NVRAM))
5263                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5264 }
5265
5266 static void tg3_dump_state(struct tg3 *tp)
5267 {
5268         int i;
5269         u32 *regs;
5270
5271         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5272         if (!regs) {
5273                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5274                 return;
5275         }
5276
5277         if (tg3_flag(tp, PCI_EXPRESS)) {
5278                 /* Read up to but not including private PCI registers */
5279                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5280                         regs[i / sizeof(u32)] = tr32(i);
5281         } else
5282                 tg3_dump_legacy_regs(tp, regs);
5283
5284         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5285                 if (!regs[i + 0] && !regs[i + 1] &&
5286                     !regs[i + 2] && !regs[i + 3])
5287                         continue;
5288
5289                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5290                            i * 4,
5291                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5292         }
5293
5294         kfree(regs);
5295
5296         for (i = 0; i < tp->irq_cnt; i++) {
5297                 struct tg3_napi *tnapi = &tp->napi[i];
5298
5299                 /* SW status block */
5300                 netdev_err(tp->dev,
5301                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5302                            i,
5303                            tnapi->hw_status->status,
5304                            tnapi->hw_status->status_tag,
5305                            tnapi->hw_status->rx_jumbo_consumer,
5306                            tnapi->hw_status->rx_consumer,
5307                            tnapi->hw_status->rx_mini_consumer,
5308                            tnapi->hw_status->idx[0].rx_producer,
5309                            tnapi->hw_status->idx[0].tx_consumer);
5310
5311                 netdev_err(tp->dev,
5312                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5313                            i,
5314                            tnapi->last_tag, tnapi->last_irq_tag,
5315                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5316                            tnapi->rx_rcb_ptr,
5317                            tnapi->prodring.rx_std_prod_idx,
5318                            tnapi->prodring.rx_std_cons_idx,
5319                            tnapi->prodring.rx_jmb_prod_idx,
5320                            tnapi->prodring.rx_jmb_cons_idx);
5321         }
5322 }
5323
5324 /* This is called whenever we suspect that the system chipset is re-
5325  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5326  * is bogus tx completions. We try to recover by setting the
5327  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5328  * in the workqueue.
5329  */
5330 static void tg3_tx_recover(struct tg3 *tp)
5331 {
5332         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5333                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5334
5335         netdev_warn(tp->dev,
5336                     "The system may be re-ordering memory-mapped I/O "
5337                     "cycles to the network device, attempting to recover. "
5338                     "Please report the problem to the driver maintainer "
5339                     "and include system chipset information.\n");
5340
5341         spin_lock(&tp->lock);
5342         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5343         spin_unlock(&tp->lock);
5344 }
5345
5346 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5347 {
5348         /* Tell compiler to fetch tx indices from memory. */
5349         barrier();
5350         return tnapi->tx_pending -
5351                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5352 }
5353
5354 /* Tigon3 never reports partial packet sends.  So we do not
5355  * need special logic to handle SKBs that have not had all
5356  * of their frags sent yet, like SunGEM does.
5357  */
5358 static void tg3_tx(struct tg3_napi *tnapi)
5359 {
5360         struct tg3 *tp = tnapi->tp;
5361         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5362         u32 sw_idx = tnapi->tx_cons;
5363         struct netdev_queue *txq;
5364         int index = tnapi - tp->napi;
5365
5366         if (tg3_flag(tp, ENABLE_TSS))
5367                 index--;
5368
5369         txq = netdev_get_tx_queue(tp->dev, index);
5370
5371         while (sw_idx != hw_idx) {
5372                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5373                 struct sk_buff *skb = ri->skb;
5374                 int i, tx_bug = 0;
5375
5376                 if (unlikely(skb == NULL)) {
5377                         tg3_tx_recover(tp);
5378                         return;
5379                 }
5380
5381                 pci_unmap_single(tp->pdev,
5382                                  dma_unmap_addr(ri, mapping),
5383                                  skb_headlen(skb),
5384                                  PCI_DMA_TODEVICE);
5385
5386                 ri->skb = NULL;
5387
5388                 while (ri->fragmented) {
5389                         ri->fragmented = false;
5390                         sw_idx = NEXT_TX(sw_idx);
5391                         ri = &tnapi->tx_buffers[sw_idx];
5392                 }
5393
5394                 sw_idx = NEXT_TX(sw_idx);
5395
5396                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5397                         ri = &tnapi->tx_buffers[sw_idx];
5398                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5399                                 tx_bug = 1;
5400
5401                         pci_unmap_page(tp->pdev,
5402                                        dma_unmap_addr(ri, mapping),
5403                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5404                                        PCI_DMA_TODEVICE);
5405
5406                         while (ri->fragmented) {
5407                                 ri->fragmented = false;
5408                                 sw_idx = NEXT_TX(sw_idx);
5409                                 ri = &tnapi->tx_buffers[sw_idx];
5410                         }
5411
5412                         sw_idx = NEXT_TX(sw_idx);
5413                 }
5414
5415                 dev_kfree_skb(skb);
5416
5417                 if (unlikely(tx_bug)) {
5418                         tg3_tx_recover(tp);
5419                         return;
5420                 }
5421         }
5422
5423         tnapi->tx_cons = sw_idx;
5424
5425         /* Need to make the tx_cons update visible to tg3_start_xmit()
5426          * before checking for netif_queue_stopped().  Without the
5427          * memory barrier, there is a small possibility that tg3_start_xmit()
5428          * will miss it and cause the queue to be stopped forever.
5429          */
5430         smp_mb();
5431
5432         if (unlikely(netif_tx_queue_stopped(txq) &&
5433                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5434                 __netif_tx_lock(txq, smp_processor_id());
5435                 if (netif_tx_queue_stopped(txq) &&
5436                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5437                         netif_tx_wake_queue(txq);
5438                 __netif_tx_unlock(txq);
5439         }
5440 }
5441
5442 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5443 {
5444         if (!ri->skb)
5445                 return;
5446
5447         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5448                          map_sz, PCI_DMA_FROMDEVICE);
5449         dev_kfree_skb_any(ri->skb);
5450         ri->skb = NULL;
5451 }
5452
5453 /* Returns size of skb allocated or < 0 on error.
5454  *
5455  * We only need to fill in the address because the other members
5456  * of the RX descriptor are invariant, see tg3_init_rings.
5457  *
5458  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5459  * posting buffers we only dirty the first cache line of the RX
5460  * descriptor (containing the address).  Whereas for the RX status
5461  * buffers the cpu only reads the last cacheline of the RX descriptor
5462  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5463  */
5464 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5465                             u32 opaque_key, u32 dest_idx_unmasked)
5466 {
5467         struct tg3_rx_buffer_desc *desc;
5468         struct ring_info *map;
5469         struct sk_buff *skb;
5470         dma_addr_t mapping;
5471         int skb_size, dest_idx;
5472
5473         switch (opaque_key) {
5474         case RXD_OPAQUE_RING_STD:
5475                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5476                 desc = &tpr->rx_std[dest_idx];
5477                 map = &tpr->rx_std_buffers[dest_idx];
5478                 skb_size = tp->rx_pkt_map_sz;
5479                 break;
5480
5481         case RXD_OPAQUE_RING_JUMBO:
5482                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5483                 desc = &tpr->rx_jmb[dest_idx].std;
5484                 map = &tpr->rx_jmb_buffers[dest_idx];
5485                 skb_size = TG3_RX_JMB_MAP_SZ;
5486                 break;
5487
5488         default:
5489                 return -EINVAL;
5490         }
5491
5492         /* Do not overwrite any of the map or rp information
5493          * until we are sure we can commit to a new buffer.
5494          *
5495          * Callers depend upon this behavior and assume that
5496          * we leave everything unchanged if we fail.
5497          */
5498         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5499         if (skb == NULL)
5500                 return -ENOMEM;
5501
5502         skb_reserve(skb, TG3_RX_OFFSET(tp));
5503
5504         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5505                                  PCI_DMA_FROMDEVICE);
5506         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5507                 dev_kfree_skb(skb);
5508                 return -EIO;
5509         }
5510
5511         map->skb = skb;
5512         dma_unmap_addr_set(map, mapping, mapping);
5513
5514         desc->addr_hi = ((u64)mapping >> 32);
5515         desc->addr_lo = ((u64)mapping & 0xffffffff);
5516
5517         return skb_size;
5518 }
5519
5520 /* We only need to move over in the address because the other
5521  * members of the RX descriptor are invariant.  See notes above
5522  * tg3_alloc_rx_skb for full details.
5523  */
5524 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5525                            struct tg3_rx_prodring_set *dpr,
5526                            u32 opaque_key, int src_idx,
5527                            u32 dest_idx_unmasked)
5528 {
5529         struct tg3 *tp = tnapi->tp;
5530         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5531         struct ring_info *src_map, *dest_map;
5532         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5533         int dest_idx;
5534
5535         switch (opaque_key) {
5536         case RXD_OPAQUE_RING_STD:
5537                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5538                 dest_desc = &dpr->rx_std[dest_idx];
5539                 dest_map = &dpr->rx_std_buffers[dest_idx];
5540                 src_desc = &spr->rx_std[src_idx];
5541                 src_map = &spr->rx_std_buffers[src_idx];
5542                 break;
5543
5544         case RXD_OPAQUE_RING_JUMBO:
5545                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5546                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5547                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5548                 src_desc = &spr->rx_jmb[src_idx].std;
5549                 src_map = &spr->rx_jmb_buffers[src_idx];
5550                 break;
5551
5552         default:
5553                 return;
5554         }
5555
5556         dest_map->skb = src_map->skb;
5557         dma_unmap_addr_set(dest_map, mapping,
5558                            dma_unmap_addr(src_map, mapping));
5559         dest_desc->addr_hi = src_desc->addr_hi;
5560         dest_desc->addr_lo = src_desc->addr_lo;
5561
5562         /* Ensure that the update to the skb happens after the physical
5563          * addresses have been transferred to the new BD location.
5564          */
5565         smp_wmb();
5566
5567         src_map->skb = NULL;
5568 }
5569
5570 /* The RX ring scheme is composed of multiple rings which post fresh
5571  * buffers to the chip, and one special ring the chip uses to report
5572  * status back to the host.
5573  *
5574  * The special ring reports the status of received packets to the
5575  * host.  The chip does not write into the original descriptor the
5576  * RX buffer was obtained from.  The chip simply takes the original
5577  * descriptor as provided by the host, updates the status and length
5578  * field, then writes this into the next status ring entry.
5579  *
5580  * Each ring the host uses to post buffers to the chip is described
5581  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5582  * it is first placed into the on-chip ram.  When the packet's length
5583  * is known, it walks down the TG3_BDINFO entries to select the ring.
5584  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5585  * which is within the range of the new packet's length is chosen.
5586  *
5587  * The "separate ring for rx status" scheme may sound queer, but it makes
5588  * sense from a cache coherency perspective.  If only the host writes
5589  * to the buffer post rings, and only the chip writes to the rx status
5590  * rings, then cache lines never move beyond shared-modified state.
5591  * If both the host and chip were to write into the same ring, cache line
5592  * eviction could occur since both entities want it in an exclusive state.
5593  */
5594 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5595 {
5596         struct tg3 *tp = tnapi->tp;
5597         u32 work_mask, rx_std_posted = 0;
5598         u32 std_prod_idx, jmb_prod_idx;
5599         u32 sw_idx = tnapi->rx_rcb_ptr;
5600         u16 hw_idx;
5601         int received;
5602         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5603
5604         hw_idx = *(tnapi->rx_rcb_prod_idx);
5605         /*
5606          * We need to order the read of hw_idx and the read of
5607          * the opaque cookie.
5608          */
5609         rmb();
5610         work_mask = 0;
5611         received = 0;
5612         std_prod_idx = tpr->rx_std_prod_idx;
5613         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5614         while (sw_idx != hw_idx && budget > 0) {
5615                 struct ring_info *ri;
5616                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5617                 unsigned int len;
5618                 struct sk_buff *skb;
5619                 dma_addr_t dma_addr;
5620                 u32 opaque_key, desc_idx, *post_ptr;
5621
5622                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5623                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5624                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5625                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5626                         dma_addr = dma_unmap_addr(ri, mapping);
5627                         skb = ri->skb;
5628                         post_ptr = &std_prod_idx;
5629                         rx_std_posted++;
5630                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5631                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5632                         dma_addr = dma_unmap_addr(ri, mapping);
5633                         skb = ri->skb;
5634                         post_ptr = &jmb_prod_idx;
5635                 } else
5636                         goto next_pkt_nopost;
5637
5638                 work_mask |= opaque_key;
5639
5640                 if (desc->err_vlan & RXD_ERR_MASK) {
5641                 drop_it:
5642                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5643                                        desc_idx, *post_ptr);
5644                 drop_it_no_recycle:
5645                         /* Other statistics kept track of by card. */
5646                         tp->rx_dropped++;
5647                         goto next_pkt;
5648                 }
5649
5650                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5651                       ETH_FCS_LEN;
5652
5653                 if (len > TG3_RX_COPY_THRESH(tp)) {
5654                         int skb_size;
5655
5656                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5657                                                     *post_ptr);
5658                         if (skb_size < 0)
5659                                 goto drop_it;
5660
5661                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5662                                          PCI_DMA_FROMDEVICE);
5663
5664                         /* Ensure that the update to the skb happens
5665                          * after the usage of the old DMA mapping.
5666                          */
5667                         smp_wmb();
5668
5669                         ri->skb = NULL;
5670
5671                         skb_put(skb, len);
5672                 } else {
5673                         struct sk_buff *copy_skb;
5674
5675                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5676                                        desc_idx, *post_ptr);
5677
5678                         copy_skb = netdev_alloc_skb(tp->dev, len +
5679                                                     TG3_RAW_IP_ALIGN);
5680                         if (copy_skb == NULL)
5681                                 goto drop_it_no_recycle;
5682
5683                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5684                         skb_put(copy_skb, len);
5685                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5686                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5687                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5688
5689                         /* We'll reuse the original ring buffer. */
5690                         skb = copy_skb;
5691                 }
5692
5693                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5694                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5695                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5696                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5697                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5698                 else
5699                         skb_checksum_none_assert(skb);
5700
5701                 skb->protocol = eth_type_trans(skb, tp->dev);
5702
5703                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5704                     skb->protocol != htons(ETH_P_8021Q)) {
5705                         dev_kfree_skb(skb);
5706                         goto drop_it_no_recycle;
5707                 }
5708
5709                 if (desc->type_flags & RXD_FLAG_VLAN &&
5710                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5711                         __vlan_hwaccel_put_tag(skb,
5712                                                desc->err_vlan & RXD_VLAN_MASK);
5713
5714                 napi_gro_receive(&tnapi->napi, skb);
5715
5716                 received++;
5717                 budget--;
5718
5719 next_pkt:
5720                 (*post_ptr)++;
5721
5722                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5723                         tpr->rx_std_prod_idx = std_prod_idx &
5724                                                tp->rx_std_ring_mask;
5725                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5726                                      tpr->rx_std_prod_idx);
5727                         work_mask &= ~RXD_OPAQUE_RING_STD;
5728                         rx_std_posted = 0;
5729                 }
5730 next_pkt_nopost:
5731                 sw_idx++;
5732                 sw_idx &= tp->rx_ret_ring_mask;
5733
5734                 /* Refresh hw_idx to see if there is new work */
5735                 if (sw_idx == hw_idx) {
5736                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5737                         rmb();
5738                 }
5739         }
5740
5741         /* ACK the status ring. */
5742         tnapi->rx_rcb_ptr = sw_idx;
5743         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5744
5745         /* Refill RX ring(s). */
5746         if (!tg3_flag(tp, ENABLE_RSS)) {
5747                 if (work_mask & RXD_OPAQUE_RING_STD) {
5748                         tpr->rx_std_prod_idx = std_prod_idx &
5749                                                tp->rx_std_ring_mask;
5750                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5751                                      tpr->rx_std_prod_idx);
5752                 }
5753                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5754                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5755                                                tp->rx_jmb_ring_mask;
5756                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5757                                      tpr->rx_jmb_prod_idx);
5758                 }
5759                 mmiowb();
5760         } else if (work_mask) {
5761                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5762                  * updated before the producer indices can be updated.
5763                  */
5764                 smp_wmb();
5765
5766                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5767                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5768
5769                 if (tnapi != &tp->napi[1])
5770                         napi_schedule(&tp->napi[1].napi);
5771         }
5772
5773         return received;
5774 }
5775
5776 static void tg3_poll_link(struct tg3 *tp)
5777 {
5778         /* handle link change and other phy events */
5779         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5780                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5781
5782                 if (sblk->status & SD_STATUS_LINK_CHG) {
5783                         sblk->status = SD_STATUS_UPDATED |
5784                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5785                         spin_lock(&tp->lock);
5786                         if (tg3_flag(tp, USE_PHYLIB)) {
5787                                 tw32_f(MAC_STATUS,
5788                                      (MAC_STATUS_SYNC_CHANGED |
5789                                       MAC_STATUS_CFG_CHANGED |
5790                                       MAC_STATUS_MI_COMPLETION |
5791                                       MAC_STATUS_LNKSTATE_CHANGED));
5792                                 udelay(40);
5793                         } else
5794                                 tg3_setup_phy(tp, 0);
5795                         spin_unlock(&tp->lock);
5796                 }
5797         }
5798 }
5799
5800 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5801                                 struct tg3_rx_prodring_set *dpr,
5802                                 struct tg3_rx_prodring_set *spr)
5803 {
5804         u32 si, di, cpycnt, src_prod_idx;
5805         int i, err = 0;
5806
5807         while (1) {
5808                 src_prod_idx = spr->rx_std_prod_idx;
5809
5810                 /* Make sure updates to the rx_std_buffers[] entries and the
5811                  * standard producer index are seen in the correct order.
5812                  */
5813                 smp_rmb();
5814
5815                 if (spr->rx_std_cons_idx == src_prod_idx)
5816                         break;
5817
5818                 if (spr->rx_std_cons_idx < src_prod_idx)
5819                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5820                 else
5821                         cpycnt = tp->rx_std_ring_mask + 1 -
5822                                  spr->rx_std_cons_idx;
5823
5824                 cpycnt = min(cpycnt,
5825                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5826
5827                 si = spr->rx_std_cons_idx;
5828                 di = dpr->rx_std_prod_idx;
5829
5830                 for (i = di; i < di + cpycnt; i++) {
5831                         if (dpr->rx_std_buffers[i].skb) {
5832                                 cpycnt = i - di;
5833                                 err = -ENOSPC;
5834                                 break;
5835                         }
5836                 }
5837
5838                 if (!cpycnt)
5839                         break;
5840
5841                 /* Ensure that updates to the rx_std_buffers ring and the
5842                  * shadowed hardware producer ring from tg3_recycle_skb() are
5843                  * ordered correctly WRT the skb check above.
5844                  */
5845                 smp_rmb();
5846
5847                 memcpy(&dpr->rx_std_buffers[di],
5848                        &spr->rx_std_buffers[si],
5849                        cpycnt * sizeof(struct ring_info));
5850
5851                 for (i = 0; i < cpycnt; i++, di++, si++) {
5852                         struct tg3_rx_buffer_desc *sbd, *dbd;
5853                         sbd = &spr->rx_std[si];
5854                         dbd = &dpr->rx_std[di];
5855                         dbd->addr_hi = sbd->addr_hi;
5856                         dbd->addr_lo = sbd->addr_lo;
5857                 }
5858
5859                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5860                                        tp->rx_std_ring_mask;
5861                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5862                                        tp->rx_std_ring_mask;
5863         }
5864
5865         while (1) {
5866                 src_prod_idx = spr->rx_jmb_prod_idx;
5867
5868                 /* Make sure updates to the rx_jmb_buffers[] entries and
5869                  * the jumbo producer index are seen in the correct order.
5870                  */
5871                 smp_rmb();
5872
5873                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5874                         break;
5875
5876                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5877                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5878                 else
5879                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5880                                  spr->rx_jmb_cons_idx;
5881
5882                 cpycnt = min(cpycnt,
5883                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5884
5885                 si = spr->rx_jmb_cons_idx;
5886                 di = dpr->rx_jmb_prod_idx;
5887
5888                 for (i = di; i < di + cpycnt; i++) {
5889                         if (dpr->rx_jmb_buffers[i].skb) {
5890                                 cpycnt = i - di;
5891                                 err = -ENOSPC;
5892                                 break;
5893                         }
5894                 }
5895
5896                 if (!cpycnt)
5897                         break;
5898
5899                 /* Ensure that updates to the rx_jmb_buffers ring and the
5900                  * shadowed hardware producer ring from tg3_recycle_skb() are
5901                  * ordered correctly WRT the skb check above.
5902                  */
5903                 smp_rmb();
5904
5905                 memcpy(&dpr->rx_jmb_buffers[di],
5906                        &spr->rx_jmb_buffers[si],
5907                        cpycnt * sizeof(struct ring_info));
5908
5909                 for (i = 0; i < cpycnt; i++, di++, si++) {
5910                         struct tg3_rx_buffer_desc *sbd, *dbd;
5911                         sbd = &spr->rx_jmb[si].std;
5912                         dbd = &dpr->rx_jmb[di].std;
5913                         dbd->addr_hi = sbd->addr_hi;
5914                         dbd->addr_lo = sbd->addr_lo;
5915                 }
5916
5917                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5918                                        tp->rx_jmb_ring_mask;
5919                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5920                                        tp->rx_jmb_ring_mask;
5921         }
5922
5923         return err;
5924 }
5925
5926 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5927 {
5928         struct tg3 *tp = tnapi->tp;
5929
5930         /* run TX completion thread */
5931         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5932                 tg3_tx(tnapi);
5933                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5934                         return work_done;
5935         }
5936
5937         if (!tnapi->rx_rcb_prod_idx)
5938                 return work_done;
5939
5940         /* run RX thread, within the bounds set by NAPI.
5941          * All RX "locking" is done by ensuring outside
5942          * code synchronizes with tg3->napi.poll()
5943          */
5944         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5945                 work_done += tg3_rx(tnapi, budget - work_done);
5946
5947         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5948                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5949                 int i, err = 0;
5950                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5951                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5952
5953                 for (i = 1; i < tp->irq_cnt; i++)
5954                         err |= tg3_rx_prodring_xfer(tp, dpr,
5955                                                     &tp->napi[i].prodring);
5956
5957                 wmb();
5958
5959                 if (std_prod_idx != dpr->rx_std_prod_idx)
5960                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5961                                      dpr->rx_std_prod_idx);
5962
5963                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5964                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5965                                      dpr->rx_jmb_prod_idx);
5966
5967                 mmiowb();
5968
5969                 if (err)
5970                         tw32_f(HOSTCC_MODE, tp->coal_now);
5971         }
5972
5973         return work_done;
5974 }
5975
5976 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5977 {
5978         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5979                 schedule_work(&tp->reset_task);
5980 }
5981
5982 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5983 {
5984         cancel_work_sync(&tp->reset_task);
5985         tg3_flag_clear(tp, RESET_TASK_PENDING);
5986 }
5987
5988 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5989 {
5990         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5991         struct tg3 *tp = tnapi->tp;
5992         int work_done = 0;
5993         struct tg3_hw_status *sblk = tnapi->hw_status;
5994
5995         while (1) {
5996                 work_done = tg3_poll_work(tnapi, work_done, budget);
5997
5998                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5999                         goto tx_recovery;
6000
6001                 if (unlikely(work_done >= budget))
6002                         break;
6003
6004                 /* tp->last_tag is used in tg3_int_reenable() below
6005                  * to tell the hw how much work has been processed,
6006                  * so we must read it before checking for more work.
6007                  */
6008                 tnapi->last_tag = sblk->status_tag;
6009                 tnapi->last_irq_tag = tnapi->last_tag;
6010                 rmb();
6011
6012                 /* check for RX/TX work to do */
6013                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6014                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6015                         napi_complete(napi);
6016                         /* Reenable interrupts. */
6017                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6018                         mmiowb();
6019                         break;
6020                 }
6021         }
6022
6023         return work_done;
6024
6025 tx_recovery:
6026         /* work_done is guaranteed to be less than budget. */
6027         napi_complete(napi);
6028         tg3_reset_task_schedule(tp);
6029         return work_done;
6030 }
6031
6032 static void tg3_process_error(struct tg3 *tp)
6033 {
6034         u32 val;
6035         bool real_error = false;
6036
6037         if (tg3_flag(tp, ERROR_PROCESSED))
6038                 return;
6039
6040         /* Check Flow Attention register */
6041         val = tr32(HOSTCC_FLOW_ATTN);
6042         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6043                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6044                 real_error = true;
6045         }
6046
6047         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6048                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6049                 real_error = true;
6050         }
6051
6052         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6053                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6054                 real_error = true;
6055         }
6056
6057         if (!real_error)
6058                 return;
6059
6060         tg3_dump_state(tp);
6061
6062         tg3_flag_set(tp, ERROR_PROCESSED);
6063         tg3_reset_task_schedule(tp);
6064 }
6065
6066 static int tg3_poll(struct napi_struct *napi, int budget)
6067 {
6068         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6069         struct tg3 *tp = tnapi->tp;
6070         int work_done = 0;
6071         struct tg3_hw_status *sblk = tnapi->hw_status;
6072
6073         while (1) {
6074                 if (sblk->status & SD_STATUS_ERROR)
6075                         tg3_process_error(tp);
6076
6077                 tg3_poll_link(tp);
6078
6079                 work_done = tg3_poll_work(tnapi, work_done, budget);
6080
6081                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6082                         goto tx_recovery;
6083
6084                 if (unlikely(work_done >= budget))
6085                         break;
6086
6087                 if (tg3_flag(tp, TAGGED_STATUS)) {
6088                         /* tp->last_tag is used in tg3_int_reenable() below
6089                          * to tell the hw how much work has been processed,
6090                          * so we must read it before checking for more work.
6091                          */
6092                         tnapi->last_tag = sblk->status_tag;
6093                         tnapi->last_irq_tag = tnapi->last_tag;
6094                         rmb();
6095                 } else
6096                         sblk->status &= ~SD_STATUS_UPDATED;
6097
6098                 if (likely(!tg3_has_work(tnapi))) {
6099                         napi_complete(napi);
6100                         tg3_int_reenable(tnapi);
6101                         break;
6102                 }
6103         }
6104
6105         return work_done;
6106
6107 tx_recovery:
6108         /* work_done is guaranteed to be less than budget. */
6109         napi_complete(napi);
6110         tg3_reset_task_schedule(tp);
6111         return work_done;
6112 }
6113
6114 static void tg3_napi_disable(struct tg3 *tp)
6115 {
6116         int i;
6117
6118         for (i = tp->irq_cnt - 1; i >= 0; i--)
6119                 napi_disable(&tp->napi[i].napi);
6120 }
6121
6122 static void tg3_napi_enable(struct tg3 *tp)
6123 {
6124         int i;
6125
6126         for (i = 0; i < tp->irq_cnt; i++)
6127                 napi_enable(&tp->napi[i].napi);
6128 }
6129
6130 static void tg3_napi_init(struct tg3 *tp)
6131 {
6132         int i;
6133
6134         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6135         for (i = 1; i < tp->irq_cnt; i++)
6136                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6137 }
6138
6139 static void tg3_napi_fini(struct tg3 *tp)
6140 {
6141         int i;
6142
6143         for (i = 0; i < tp->irq_cnt; i++)
6144                 netif_napi_del(&tp->napi[i].napi);
6145 }
6146
6147 static inline void tg3_netif_stop(struct tg3 *tp)
6148 {
6149         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6150         tg3_napi_disable(tp);
6151         netif_tx_disable(tp->dev);
6152 }
6153
6154 static inline void tg3_netif_start(struct tg3 *tp)
6155 {
6156         /* NOTE: unconditional netif_tx_wake_all_queues is only
6157          * appropriate so long as all callers are assured to
6158          * have free tx slots (such as after tg3_init_hw)
6159          */
6160         netif_tx_wake_all_queues(tp->dev);
6161
6162         tg3_napi_enable(tp);
6163         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6164         tg3_enable_ints(tp);
6165 }
6166
6167 static void tg3_irq_quiesce(struct tg3 *tp)
6168 {
6169         int i;
6170
6171         BUG_ON(tp->irq_sync);
6172
6173         tp->irq_sync = 1;
6174         smp_mb();
6175
6176         for (i = 0; i < tp->irq_cnt; i++)
6177                 synchronize_irq(tp->napi[i].irq_vec);
6178 }
6179
6180 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6181  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6182  * with as well.  Most of the time, this is not necessary except when
6183  * shutting down the device.
6184  */
6185 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6186 {
6187         spin_lock_bh(&tp->lock);
6188         if (irq_sync)
6189                 tg3_irq_quiesce(tp);
6190 }
6191
6192 static inline void tg3_full_unlock(struct tg3 *tp)
6193 {
6194         spin_unlock_bh(&tp->lock);
6195 }
6196
6197 /* One-shot MSI handler - Chip automatically disables interrupt
6198  * after sending MSI so driver doesn't have to do it.
6199  */
6200 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6201 {
6202         struct tg3_napi *tnapi = dev_id;
6203         struct tg3 *tp = tnapi->tp;
6204
6205         prefetch(tnapi->hw_status);
6206         if (tnapi->rx_rcb)
6207                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6208
6209         if (likely(!tg3_irq_sync(tp)))
6210                 napi_schedule(&tnapi->napi);
6211
6212         return IRQ_HANDLED;
6213 }
6214
6215 /* MSI ISR - No need to check for interrupt sharing and no need to
6216  * flush status block and interrupt mailbox. PCI ordering rules
6217  * guarantee that MSI will arrive after the status block.
6218  */
6219 static irqreturn_t tg3_msi(int irq, void *dev_id)
6220 {
6221         struct tg3_napi *tnapi = dev_id;
6222         struct tg3 *tp = tnapi->tp;
6223
6224         prefetch(tnapi->hw_status);
6225         if (tnapi->rx_rcb)
6226                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6227         /*
6228          * Writing any value to intr-mbox-0 clears PCI INTA# and
6229          * chip-internal interrupt pending events.
6230          * Writing non-zero to intr-mbox-0 additional tells the
6231          * NIC to stop sending us irqs, engaging "in-intr-handler"
6232          * event coalescing.
6233          */
6234         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6235         if (likely(!tg3_irq_sync(tp)))
6236                 napi_schedule(&tnapi->napi);
6237
6238         return IRQ_RETVAL(1);
6239 }
6240
6241 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6242 {
6243         struct tg3_napi *tnapi = dev_id;
6244         struct tg3 *tp = tnapi->tp;
6245         struct tg3_hw_status *sblk = tnapi->hw_status;
6246         unsigned int handled = 1;
6247
6248         /* In INTx mode, it is possible for the interrupt to arrive at
6249          * the CPU before the status block posted prior to the interrupt.
6250          * Reading the PCI State register will confirm whether the
6251          * interrupt is ours and will flush the status block.
6252          */
6253         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6254                 if (tg3_flag(tp, CHIP_RESETTING) ||
6255                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6256                         handled = 0;
6257                         goto out;
6258                 }
6259         }
6260
6261         /*
6262          * Writing any value to intr-mbox-0 clears PCI INTA# and
6263          * chip-internal interrupt pending events.
6264          * Writing non-zero to intr-mbox-0 additional tells the
6265          * NIC to stop sending us irqs, engaging "in-intr-handler"
6266          * event coalescing.
6267          *
6268          * Flush the mailbox to de-assert the IRQ immediately to prevent
6269          * spurious interrupts.  The flush impacts performance but
6270          * excessive spurious interrupts can be worse in some cases.
6271          */
6272         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6273         if (tg3_irq_sync(tp))
6274                 goto out;
6275         sblk->status &= ~SD_STATUS_UPDATED;
6276         if (likely(tg3_has_work(tnapi))) {
6277                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6278                 napi_schedule(&tnapi->napi);
6279         } else {
6280                 /* No work, shared interrupt perhaps?  re-enable
6281                  * interrupts, and flush that PCI write
6282                  */
6283                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6284                                0x00000000);
6285         }
6286 out:
6287         return IRQ_RETVAL(handled);
6288 }
6289
6290 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6291 {
6292         struct tg3_napi *tnapi = dev_id;
6293         struct tg3 *tp = tnapi->tp;
6294         struct tg3_hw_status *sblk = tnapi->hw_status;
6295         unsigned int handled = 1;
6296
6297         /* In INTx mode, it is possible for the interrupt to arrive at
6298          * the CPU before the status block posted prior to the interrupt.
6299          * Reading the PCI State register will confirm whether the
6300          * interrupt is ours and will flush the status block.
6301          */
6302         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6303                 if (tg3_flag(tp, CHIP_RESETTING) ||
6304                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6305                         handled = 0;
6306                         goto out;
6307                 }
6308         }
6309
6310         /*
6311          * writing any value to intr-mbox-0 clears PCI INTA# and
6312          * chip-internal interrupt pending events.
6313          * writing non-zero to intr-mbox-0 additional tells the
6314          * NIC to stop sending us irqs, engaging "in-intr-handler"
6315          * event coalescing.
6316          *
6317          * Flush the mailbox to de-assert the IRQ immediately to prevent
6318          * spurious interrupts.  The flush impacts performance but
6319          * excessive spurious interrupts can be worse in some cases.
6320          */
6321         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6322
6323         /*
6324          * In a shared interrupt configuration, sometimes other devices'
6325          * interrupts will scream.  We record the current status tag here
6326          * so that the above check can report that the screaming interrupts
6327          * are unhandled.  Eventually they will be silenced.
6328          */
6329         tnapi->last_irq_tag = sblk->status_tag;
6330
6331         if (tg3_irq_sync(tp))
6332                 goto out;
6333
6334         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6335
6336         napi_schedule(&tnapi->napi);
6337
6338 out:
6339         return IRQ_RETVAL(handled);
6340 }
6341
6342 /* ISR for interrupt test */
6343 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6344 {
6345         struct tg3_napi *tnapi = dev_id;
6346         struct tg3 *tp = tnapi->tp;
6347         struct tg3_hw_status *sblk = tnapi->hw_status;
6348
6349         if ((sblk->status & SD_STATUS_UPDATED) ||
6350             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6351                 tg3_disable_ints(tp);
6352                 return IRQ_RETVAL(1);
6353         }
6354         return IRQ_RETVAL(0);
6355 }
6356
6357 static int tg3_init_hw(struct tg3 *, int);
6358 static int tg3_halt(struct tg3 *, int, int);
6359
6360 /* Restart hardware after configuration changes, self-test, etc.
6361  * Invoked with tp->lock held.
6362  */
6363 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6364         __releases(tp->lock)
6365         __acquires(tp->lock)
6366 {
6367         int err;
6368
6369         err = tg3_init_hw(tp, reset_phy);
6370         if (err) {
6371                 netdev_err(tp->dev,
6372                            "Failed to re-initialize device, aborting\n");
6373                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6374                 tg3_full_unlock(tp);
6375                 del_timer_sync(&tp->timer);
6376                 tp->irq_sync = 0;
6377                 tg3_napi_enable(tp);
6378                 dev_close(tp->dev);
6379                 tg3_full_lock(tp, 0);
6380         }
6381         return err;
6382 }
6383
6384 #ifdef CONFIG_NET_POLL_CONTROLLER
6385 static void tg3_poll_controller(struct net_device *dev)
6386 {
6387         int i;
6388         struct tg3 *tp = netdev_priv(dev);
6389
6390         if (tg3_irq_sync(tp))
6391                 return;
6392
6393         for (i = 0; i < tp->irq_cnt; i++)
6394                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6395 }
6396 #endif
6397
6398 static void tg3_reset_task(struct work_struct *work)
6399 {
6400         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6401         int err;
6402
6403         tg3_full_lock(tp, 0);
6404
6405         if (!netif_running(tp->dev)) {
6406                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6407                 tg3_full_unlock(tp);
6408                 return;
6409         }
6410
6411         tg3_full_unlock(tp);
6412
6413         tg3_phy_stop(tp);
6414
6415         tg3_netif_stop(tp);
6416
6417         tg3_full_lock(tp, 1);
6418
6419         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6420                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6421                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6422                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6423                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6424         }
6425
6426         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6427         err = tg3_init_hw(tp, 1);
6428         if (err)
6429                 goto out;
6430
6431         tg3_netif_start(tp);
6432
6433 out:
6434         tg3_full_unlock(tp);
6435
6436         if (!err)
6437                 tg3_phy_start(tp);
6438
6439         tg3_flag_clear(tp, RESET_TASK_PENDING);
6440 }
6441
6442 static void tg3_tx_timeout(struct net_device *dev)
6443 {
6444         struct tg3 *tp = netdev_priv(dev);
6445
6446         if (netif_msg_tx_err(tp)) {
6447                 netdev_err(dev, "transmit timed out, resetting\n");
6448                 tg3_dump_state(tp);
6449         }
6450
6451         tg3_reset_task_schedule(tp);
6452 }
6453
6454 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6455 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6456 {
6457         u32 base = (u32) mapping & 0xffffffff;
6458
6459         return (base > 0xffffdcc0) && (base + len + 8 < base);
6460 }
6461
6462 /* Test for DMA addresses > 40-bit */
6463 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6464                                           int len)
6465 {
6466 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6467         if (tg3_flag(tp, 40BIT_DMA_BUG))
6468                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6469         return 0;
6470 #else
6471         return 0;
6472 #endif
6473 }
6474
6475 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6476                                  dma_addr_t mapping, u32 len, u32 flags,
6477                                  u32 mss, u32 vlan)
6478 {
6479         txbd->addr_hi = ((u64) mapping >> 32);
6480         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6481         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6482         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6483 }
6484
6485 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6486                             dma_addr_t map, u32 len, u32 flags,
6487                             u32 mss, u32 vlan)
6488 {
6489         struct tg3 *tp = tnapi->tp;
6490         bool hwbug = false;
6491
6492         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6493                 hwbug = 1;
6494
6495         if (tg3_4g_overflow_test(map, len))
6496                 hwbug = 1;
6497
6498         if (tg3_40bit_overflow_test(tp, map, len))
6499                 hwbug = 1;
6500
6501         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6502                 u32 prvidx = *entry;
6503                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6504                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6505                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6506                         len -= TG3_TX_BD_DMA_MAX;
6507
6508                         /* Avoid the 8byte DMA problem */
6509                         if (len <= 8) {
6510                                 len += TG3_TX_BD_DMA_MAX / 2;
6511                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6512                         }
6513
6514                         tnapi->tx_buffers[*entry].fragmented = true;
6515
6516                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6517                                       frag_len, tmp_flag, mss, vlan);
6518                         *budget -= 1;
6519                         prvidx = *entry;
6520                         *entry = NEXT_TX(*entry);
6521
6522                         map += frag_len;
6523                 }
6524
6525                 if (len) {
6526                         if (*budget) {
6527                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6528                                               len, flags, mss, vlan);
6529                                 *budget -= 1;
6530                                 *entry = NEXT_TX(*entry);
6531                         } else {
6532                                 hwbug = 1;
6533                                 tnapi->tx_buffers[prvidx].fragmented = false;
6534                         }
6535                 }
6536         } else {
6537                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6538                               len, flags, mss, vlan);
6539                 *entry = NEXT_TX(*entry);
6540         }
6541
6542         return hwbug;
6543 }
6544
6545 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6546 {
6547         int i;
6548         struct sk_buff *skb;
6549         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6550
6551         skb = txb->skb;
6552         txb->skb = NULL;
6553
6554         pci_unmap_single(tnapi->tp->pdev,
6555                          dma_unmap_addr(txb, mapping),
6556                          skb_headlen(skb),
6557                          PCI_DMA_TODEVICE);
6558
6559         while (txb->fragmented) {
6560                 txb->fragmented = false;
6561                 entry = NEXT_TX(entry);
6562                 txb = &tnapi->tx_buffers[entry];
6563         }
6564
6565         for (i = 0; i <= last; i++) {
6566                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6567
6568                 entry = NEXT_TX(entry);
6569                 txb = &tnapi->tx_buffers[entry];
6570
6571                 pci_unmap_page(tnapi->tp->pdev,
6572                                dma_unmap_addr(txb, mapping),
6573                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6574
6575                 while (txb->fragmented) {
6576                         txb->fragmented = false;
6577                         entry = NEXT_TX(entry);
6578                         txb = &tnapi->tx_buffers[entry];
6579                 }
6580         }
6581 }
6582
6583 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6584 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6585                                        struct sk_buff **pskb,
6586                                        u32 *entry, u32 *budget,
6587                                        u32 base_flags, u32 mss, u32 vlan)
6588 {
6589         struct tg3 *tp = tnapi->tp;
6590         struct sk_buff *new_skb, *skb = *pskb;
6591         dma_addr_t new_addr = 0;
6592         int ret = 0;
6593
6594         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6595                 new_skb = skb_copy(skb, GFP_ATOMIC);
6596         else {
6597                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6598
6599                 new_skb = skb_copy_expand(skb,
6600                                           skb_headroom(skb) + more_headroom,
6601                                           skb_tailroom(skb), GFP_ATOMIC);
6602         }
6603
6604         if (!new_skb) {
6605                 ret = -1;
6606         } else {
6607                 /* New SKB is guaranteed to be linear. */
6608                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6609                                           PCI_DMA_TODEVICE);
6610                 /* Make sure the mapping succeeded */
6611                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6612                         dev_kfree_skb(new_skb);
6613                         ret = -1;
6614                 } else {
6615                         u32 save_entry = *entry;
6616
6617                         base_flags |= TXD_FLAG_END;
6618
6619                         tnapi->tx_buffers[*entry].skb = new_skb;
6620                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6621                                            mapping, new_addr);
6622
6623                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6624                                             new_skb->len, base_flags,
6625                                             mss, vlan)) {
6626                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6627                                 dev_kfree_skb(new_skb);
6628                                 ret = -1;
6629                         }
6630                 }
6631         }
6632
6633         dev_kfree_skb(skb);
6634         *pskb = new_skb;
6635         return ret;
6636 }
6637
6638 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6639
6640 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6641  * TSO header is greater than 80 bytes.
6642  */
6643 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6644 {
6645         struct sk_buff *segs, *nskb;
6646         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6647
6648         /* Estimate the number of fragments in the worst case */
6649         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6650                 netif_stop_queue(tp->dev);
6651
6652                 /* netif_tx_stop_queue() must be done before checking
6653                  * checking tx index in tg3_tx_avail() below, because in
6654                  * tg3_tx(), we update tx index before checking for
6655                  * netif_tx_queue_stopped().
6656                  */
6657                 smp_mb();
6658                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6659                         return NETDEV_TX_BUSY;
6660
6661                 netif_wake_queue(tp->dev);
6662         }
6663
6664         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6665         if (IS_ERR(segs))
6666                 goto tg3_tso_bug_end;
6667
6668         do {
6669                 nskb = segs;
6670                 segs = segs->next;
6671                 nskb->next = NULL;
6672                 tg3_start_xmit(nskb, tp->dev);
6673         } while (segs);
6674
6675 tg3_tso_bug_end:
6676         dev_kfree_skb(skb);
6677
6678         return NETDEV_TX_OK;
6679 }
6680
6681 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6682  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6683  */
6684 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6685 {
6686         struct tg3 *tp = netdev_priv(dev);
6687         u32 len, entry, base_flags, mss, vlan = 0;
6688         u32 budget;
6689         int i = -1, would_hit_hwbug;
6690         dma_addr_t mapping;
6691         struct tg3_napi *tnapi;
6692         struct netdev_queue *txq;
6693         unsigned int last;
6694
6695         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6696         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6697         if (tg3_flag(tp, ENABLE_TSS))
6698                 tnapi++;
6699
6700         budget = tg3_tx_avail(tnapi);
6701
6702         /* We are running in BH disabled context with netif_tx_lock
6703          * and TX reclaim runs via tp->napi.poll inside of a software
6704          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6705          * no IRQ context deadlocks to worry about either.  Rejoice!
6706          */
6707         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6708                 if (!netif_tx_queue_stopped(txq)) {
6709                         netif_tx_stop_queue(txq);
6710
6711                         /* This is a hard error, log it. */
6712                         netdev_err(dev,
6713                                    "BUG! Tx Ring full when queue awake!\n");
6714                 }
6715                 return NETDEV_TX_BUSY;
6716         }
6717
6718         entry = tnapi->tx_prod;
6719         base_flags = 0;
6720         if (skb->ip_summed == CHECKSUM_PARTIAL)
6721                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6722
6723         mss = skb_shinfo(skb)->gso_size;
6724         if (mss) {
6725                 struct iphdr *iph;
6726                 u32 tcp_opt_len, hdr_len;
6727
6728                 if (skb_header_cloned(skb) &&
6729                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6730                         goto drop;
6731
6732                 iph = ip_hdr(skb);
6733                 tcp_opt_len = tcp_optlen(skb);
6734
6735                 if (skb_is_gso_v6(skb)) {
6736                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6737                 } else {
6738                         u32 ip_tcp_len;
6739
6740                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6741                         hdr_len = ip_tcp_len + tcp_opt_len;
6742
6743                         iph->check = 0;
6744                         iph->tot_len = htons(mss + hdr_len);
6745                 }
6746
6747                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6748                     tg3_flag(tp, TSO_BUG))
6749                         return tg3_tso_bug(tp, skb);
6750
6751                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6752                                TXD_FLAG_CPU_POST_DMA);
6753
6754                 if (tg3_flag(tp, HW_TSO_1) ||
6755                     tg3_flag(tp, HW_TSO_2) ||
6756                     tg3_flag(tp, HW_TSO_3)) {
6757                         tcp_hdr(skb)->check = 0;
6758                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6759                 } else
6760                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6761                                                                  iph->daddr, 0,
6762                                                                  IPPROTO_TCP,
6763                                                                  0);
6764
6765                 if (tg3_flag(tp, HW_TSO_3)) {
6766                         mss |= (hdr_len & 0xc) << 12;
6767                         if (hdr_len & 0x10)
6768                                 base_flags |= 0x00000010;
6769                         base_flags |= (hdr_len & 0x3e0) << 5;
6770                 } else if (tg3_flag(tp, HW_TSO_2))
6771                         mss |= hdr_len << 9;
6772                 else if (tg3_flag(tp, HW_TSO_1) ||
6773                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6774                         if (tcp_opt_len || iph->ihl > 5) {
6775                                 int tsflags;
6776
6777                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6778                                 mss |= (tsflags << 11);
6779                         }
6780                 } else {
6781                         if (tcp_opt_len || iph->ihl > 5) {
6782                                 int tsflags;
6783
6784                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6785                                 base_flags |= tsflags << 12;
6786                         }
6787                 }
6788         }
6789
6790         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6791             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6792                 base_flags |= TXD_FLAG_JMB_PKT;
6793
6794         if (vlan_tx_tag_present(skb)) {
6795                 base_flags |= TXD_FLAG_VLAN;
6796                 vlan = vlan_tx_tag_get(skb);
6797         }
6798
6799         len = skb_headlen(skb);
6800
6801         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6802         if (pci_dma_mapping_error(tp->pdev, mapping))
6803                 goto drop;
6804
6805
6806         tnapi->tx_buffers[entry].skb = skb;
6807         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6808
6809         would_hit_hwbug = 0;
6810
6811         if (tg3_flag(tp, 5701_DMA_BUG))
6812                 would_hit_hwbug = 1;
6813
6814         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6815                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6816                             mss, vlan)) {
6817                 would_hit_hwbug = 1;
6818         /* Now loop through additional data fragments, and queue them. */
6819         } else if (skb_shinfo(skb)->nr_frags > 0) {
6820                 u32 tmp_mss = mss;
6821
6822                 if (!tg3_flag(tp, HW_TSO_1) &&
6823                     !tg3_flag(tp, HW_TSO_2) &&
6824                     !tg3_flag(tp, HW_TSO_3))
6825                         tmp_mss = 0;
6826
6827                 last = skb_shinfo(skb)->nr_frags - 1;
6828                 for (i = 0; i <= last; i++) {
6829                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6830
6831                         len = skb_frag_size(frag);
6832                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6833                                                    len, DMA_TO_DEVICE);
6834
6835                         tnapi->tx_buffers[entry].skb = NULL;
6836                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6837                                            mapping);
6838                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6839                                 goto dma_error;
6840
6841                         if (!budget ||
6842                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6843                                             len, base_flags |
6844                                             ((i == last) ? TXD_FLAG_END : 0),
6845                                             tmp_mss, vlan)) {
6846                                 would_hit_hwbug = 1;
6847                                 break;
6848                         }
6849                 }
6850         }
6851
6852         if (would_hit_hwbug) {
6853                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6854
6855                 /* If the workaround fails due to memory/mapping
6856                  * failure, silently drop this packet.
6857                  */
6858                 entry = tnapi->tx_prod;
6859                 budget = tg3_tx_avail(tnapi);
6860                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6861                                                 base_flags, mss, vlan))
6862                         goto drop_nofree;
6863         }
6864
6865         skb_tx_timestamp(skb);
6866
6867         /* Packets are ready, update Tx producer idx local and on card. */
6868         tw32_tx_mbox(tnapi->prodmbox, entry);
6869
6870         tnapi->tx_prod = entry;
6871         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6872                 netif_tx_stop_queue(txq);
6873
6874                 /* netif_tx_stop_queue() must be done before checking
6875                  * checking tx index in tg3_tx_avail() below, because in
6876                  * tg3_tx(), we update tx index before checking for
6877                  * netif_tx_queue_stopped().
6878                  */
6879                 smp_mb();
6880                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6881                         netif_tx_wake_queue(txq);
6882         }
6883
6884         mmiowb();
6885         return NETDEV_TX_OK;
6886
6887 dma_error:
6888         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6889         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6890 drop:
6891         dev_kfree_skb(skb);
6892 drop_nofree:
6893         tp->tx_dropped++;
6894         return NETDEV_TX_OK;
6895 }
6896
6897 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6898 {
6899         if (enable) {
6900                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6901                                   MAC_MODE_PORT_MODE_MASK);
6902
6903                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6904
6905                 if (!tg3_flag(tp, 5705_PLUS))
6906                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6907
6908                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6909                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6910                 else
6911                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6912         } else {
6913                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6914
6915                 if (tg3_flag(tp, 5705_PLUS) ||
6916                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6917                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6918                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6919         }
6920
6921         tw32(MAC_MODE, tp->mac_mode);
6922         udelay(40);
6923 }
6924
6925 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6926 {
6927         u32 val, bmcr, mac_mode, ptest = 0;
6928
6929         tg3_phy_toggle_apd(tp, false);
6930         tg3_phy_toggle_automdix(tp, 0);
6931
6932         if (extlpbk && tg3_phy_set_extloopbk(tp))
6933                 return -EIO;
6934
6935         bmcr = BMCR_FULLDPLX;
6936         switch (speed) {
6937         case SPEED_10:
6938                 break;
6939         case SPEED_100:
6940                 bmcr |= BMCR_SPEED100;
6941                 break;
6942         case SPEED_1000:
6943         default:
6944                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6945                         speed = SPEED_100;
6946                         bmcr |= BMCR_SPEED100;
6947                 } else {
6948                         speed = SPEED_1000;
6949                         bmcr |= BMCR_SPEED1000;
6950                 }
6951         }
6952
6953         if (extlpbk) {
6954                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6955                         tg3_readphy(tp, MII_CTRL1000, &val);
6956                         val |= CTL1000_AS_MASTER |
6957                                CTL1000_ENABLE_MASTER;
6958                         tg3_writephy(tp, MII_CTRL1000, val);
6959                 } else {
6960                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6961                                 MII_TG3_FET_PTEST_TRIM_2;
6962                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6963                 }
6964         } else
6965                 bmcr |= BMCR_LOOPBACK;
6966
6967         tg3_writephy(tp, MII_BMCR, bmcr);
6968
6969         /* The write needs to be flushed for the FETs */
6970         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6971                 tg3_readphy(tp, MII_BMCR, &bmcr);
6972
6973         udelay(40);
6974
6975         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6977                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6978                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6979                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6980
6981                 /* The write needs to be flushed for the AC131 */
6982                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6983         }
6984
6985         /* Reset to prevent losing 1st rx packet intermittently */
6986         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6987             tg3_flag(tp, 5780_CLASS)) {
6988                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6989                 udelay(10);
6990                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6991         }
6992
6993         mac_mode = tp->mac_mode &
6994                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6995         if (speed == SPEED_1000)
6996                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6997         else
6998                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6999
7000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7001                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7002
7003                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7004                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7005                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7006                         mac_mode |= MAC_MODE_LINK_POLARITY;
7007
7008                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7009                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7010         }
7011
7012         tw32(MAC_MODE, mac_mode);
7013         udelay(40);
7014
7015         return 0;
7016 }
7017
7018 static void tg3_set_loopback(struct net_device *dev, u32 features)
7019 {
7020         struct tg3 *tp = netdev_priv(dev);
7021
7022         if (features & NETIF_F_LOOPBACK) {
7023                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7024                         return;
7025
7026                 spin_lock_bh(&tp->lock);
7027                 tg3_mac_loopback(tp, true);
7028                 netif_carrier_on(tp->dev);
7029                 spin_unlock_bh(&tp->lock);
7030                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7031         } else {
7032                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7033                         return;
7034
7035                 spin_lock_bh(&tp->lock);
7036                 tg3_mac_loopback(tp, false);
7037                 /* Force link status check */
7038                 tg3_setup_phy(tp, 1);
7039                 spin_unlock_bh(&tp->lock);
7040                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7041         }
7042 }
7043
7044 static u32 tg3_fix_features(struct net_device *dev, u32 features)
7045 {
7046         struct tg3 *tp = netdev_priv(dev);
7047
7048         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7049                 features &= ~NETIF_F_ALL_TSO;
7050
7051         return features;
7052 }
7053
7054 static int tg3_set_features(struct net_device *dev, u32 features)
7055 {
7056         u32 changed = dev->features ^ features;
7057
7058         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7059                 tg3_set_loopback(dev, features);
7060
7061         return 0;
7062 }
7063
7064 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7065                                int new_mtu)
7066 {
7067         dev->mtu = new_mtu;
7068
7069         if (new_mtu > ETH_DATA_LEN) {
7070                 if (tg3_flag(tp, 5780_CLASS)) {
7071                         netdev_update_features(dev);
7072                         tg3_flag_clear(tp, TSO_CAPABLE);
7073                 } else {
7074                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7075                 }
7076         } else {
7077                 if (tg3_flag(tp, 5780_CLASS)) {
7078                         tg3_flag_set(tp, TSO_CAPABLE);
7079                         netdev_update_features(dev);
7080                 }
7081                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7082         }
7083 }
7084
7085 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7086 {
7087         struct tg3 *tp = netdev_priv(dev);
7088         int err;
7089
7090         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7091                 return -EINVAL;
7092
7093         if (!netif_running(dev)) {
7094                 /* We'll just catch it later when the
7095                  * device is up'd.
7096                  */
7097                 tg3_set_mtu(dev, tp, new_mtu);
7098                 return 0;
7099         }
7100
7101         tg3_phy_stop(tp);
7102
7103         tg3_netif_stop(tp);
7104
7105         tg3_set_mtu(dev, tp, new_mtu);
7106
7107         tg3_full_lock(tp, 1);
7108
7109         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7110
7111         err = tg3_restart_hw(tp, 0);
7112
7113         if (!err)
7114                 tg3_netif_start(tp);
7115
7116         tg3_full_unlock(tp);
7117
7118         if (!err)
7119                 tg3_phy_start(tp);
7120
7121         return err;
7122 }
7123
7124 static void tg3_rx_prodring_free(struct tg3 *tp,
7125                                  struct tg3_rx_prodring_set *tpr)
7126 {
7127         int i;
7128
7129         if (tpr != &tp->napi[0].prodring) {
7130                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7131                      i = (i + 1) & tp->rx_std_ring_mask)
7132                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7133                                         tp->rx_pkt_map_sz);
7134
7135                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7136                         for (i = tpr->rx_jmb_cons_idx;
7137                              i != tpr->rx_jmb_prod_idx;
7138                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7139                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7140                                                 TG3_RX_JMB_MAP_SZ);
7141                         }
7142                 }
7143
7144                 return;
7145         }
7146
7147         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7148                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7149                                 tp->rx_pkt_map_sz);
7150
7151         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7152                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7153                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7154                                         TG3_RX_JMB_MAP_SZ);
7155         }
7156 }
7157
7158 /* Initialize rx rings for packet processing.
7159  *
7160  * The chip has been shut down and the driver detached from
7161  * the networking, so no interrupts or new tx packets will
7162  * end up in the driver.  tp->{tx,}lock are held and thus
7163  * we may not sleep.
7164  */
7165 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7166                                  struct tg3_rx_prodring_set *tpr)
7167 {
7168         u32 i, rx_pkt_dma_sz;
7169
7170         tpr->rx_std_cons_idx = 0;
7171         tpr->rx_std_prod_idx = 0;
7172         tpr->rx_jmb_cons_idx = 0;
7173         tpr->rx_jmb_prod_idx = 0;
7174
7175         if (tpr != &tp->napi[0].prodring) {
7176                 memset(&tpr->rx_std_buffers[0], 0,
7177                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7178                 if (tpr->rx_jmb_buffers)
7179                         memset(&tpr->rx_jmb_buffers[0], 0,
7180                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7181                 goto done;
7182         }
7183
7184         /* Zero out all descriptors. */
7185         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7186
7187         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7188         if (tg3_flag(tp, 5780_CLASS) &&
7189             tp->dev->mtu > ETH_DATA_LEN)
7190                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7191         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7192
7193         /* Initialize invariants of the rings, we only set this
7194          * stuff once.  This works because the card does not
7195          * write into the rx buffer posting rings.
7196          */
7197         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7198                 struct tg3_rx_buffer_desc *rxd;
7199
7200                 rxd = &tpr->rx_std[i];
7201                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7202                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7203                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7204                                (i << RXD_OPAQUE_INDEX_SHIFT));
7205         }
7206
7207         /* Now allocate fresh SKBs for each rx ring. */
7208         for (i = 0; i < tp->rx_pending; i++) {
7209                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7210                         netdev_warn(tp->dev,
7211                                     "Using a smaller RX standard ring. Only "
7212                                     "%d out of %d buffers were allocated "
7213                                     "successfully\n", i, tp->rx_pending);
7214                         if (i == 0)
7215                                 goto initfail;
7216                         tp->rx_pending = i;
7217                         break;
7218                 }
7219         }
7220
7221         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7222                 goto done;
7223
7224         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7225
7226         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7227                 goto done;
7228
7229         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7230                 struct tg3_rx_buffer_desc *rxd;
7231
7232                 rxd = &tpr->rx_jmb[i].std;
7233                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7234                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7235                                   RXD_FLAG_JUMBO;
7236                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7237                        (i << RXD_OPAQUE_INDEX_SHIFT));
7238         }
7239
7240         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7241                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7242                         netdev_warn(tp->dev,
7243                                     "Using a smaller RX jumbo ring. Only %d "
7244                                     "out of %d buffers were allocated "
7245                                     "successfully\n", i, tp->rx_jumbo_pending);
7246                         if (i == 0)
7247                                 goto initfail;
7248                         tp->rx_jumbo_pending = i;
7249                         break;
7250                 }
7251         }
7252
7253 done:
7254         return 0;
7255
7256 initfail:
7257         tg3_rx_prodring_free(tp, tpr);
7258         return -ENOMEM;
7259 }
7260
7261 static void tg3_rx_prodring_fini(struct tg3 *tp,
7262                                  struct tg3_rx_prodring_set *tpr)
7263 {
7264         kfree(tpr->rx_std_buffers);
7265         tpr->rx_std_buffers = NULL;
7266         kfree(tpr->rx_jmb_buffers);
7267         tpr->rx_jmb_buffers = NULL;
7268         if (tpr->rx_std) {
7269                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7270                                   tpr->rx_std, tpr->rx_std_mapping);
7271                 tpr->rx_std = NULL;
7272         }
7273         if (tpr->rx_jmb) {
7274                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7275                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7276                 tpr->rx_jmb = NULL;
7277         }
7278 }
7279
7280 static int tg3_rx_prodring_init(struct tg3 *tp,
7281                                 struct tg3_rx_prodring_set *tpr)
7282 {
7283         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7284                                       GFP_KERNEL);
7285         if (!tpr->rx_std_buffers)
7286                 return -ENOMEM;
7287
7288         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7289                                          TG3_RX_STD_RING_BYTES(tp),
7290                                          &tpr->rx_std_mapping,
7291                                          GFP_KERNEL);
7292         if (!tpr->rx_std)
7293                 goto err_out;
7294
7295         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7296                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7297                                               GFP_KERNEL);
7298                 if (!tpr->rx_jmb_buffers)
7299                         goto err_out;
7300
7301                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7302                                                  TG3_RX_JMB_RING_BYTES(tp),
7303                                                  &tpr->rx_jmb_mapping,
7304                                                  GFP_KERNEL);
7305                 if (!tpr->rx_jmb)
7306                         goto err_out;
7307         }
7308
7309         return 0;
7310
7311 err_out:
7312         tg3_rx_prodring_fini(tp, tpr);
7313         return -ENOMEM;
7314 }
7315
7316 /* Free up pending packets in all rx/tx rings.
7317  *
7318  * The chip has been shut down and the driver detached from
7319  * the networking, so no interrupts or new tx packets will
7320  * end up in the driver.  tp->{tx,}lock is not held and we are not
7321  * in an interrupt context and thus may sleep.
7322  */
7323 static void tg3_free_rings(struct tg3 *tp)
7324 {
7325         int i, j;
7326
7327         for (j = 0; j < tp->irq_cnt; j++) {
7328                 struct tg3_napi *tnapi = &tp->napi[j];
7329
7330                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7331
7332                 if (!tnapi->tx_buffers)
7333                         continue;
7334
7335                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7336                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7337
7338                         if (!skb)
7339                                 continue;
7340
7341                         tg3_tx_skb_unmap(tnapi, i,
7342                                          skb_shinfo(skb)->nr_frags - 1);
7343
7344                         dev_kfree_skb_any(skb);
7345                 }
7346         }
7347 }
7348
7349 /* Initialize tx/rx rings for packet processing.
7350  *
7351  * The chip has been shut down and the driver detached from
7352  * the networking, so no interrupts or new tx packets will
7353  * end up in the driver.  tp->{tx,}lock are held and thus
7354  * we may not sleep.
7355  */
7356 static int tg3_init_rings(struct tg3 *tp)
7357 {
7358         int i;
7359
7360         /* Free up all the SKBs. */
7361         tg3_free_rings(tp);
7362
7363         for (i = 0; i < tp->irq_cnt; i++) {
7364                 struct tg3_napi *tnapi = &tp->napi[i];
7365
7366                 tnapi->last_tag = 0;
7367                 tnapi->last_irq_tag = 0;
7368                 tnapi->hw_status->status = 0;
7369                 tnapi->hw_status->status_tag = 0;
7370                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7371
7372                 tnapi->tx_prod = 0;
7373                 tnapi->tx_cons = 0;
7374                 if (tnapi->tx_ring)
7375                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7376
7377                 tnapi->rx_rcb_ptr = 0;
7378                 if (tnapi->rx_rcb)
7379                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7380
7381                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7382                         tg3_free_rings(tp);
7383                         return -ENOMEM;
7384                 }
7385         }
7386
7387         return 0;
7388 }
7389
7390 /*
7391  * Must not be invoked with interrupt sources disabled and
7392  * the hardware shutdown down.
7393  */
7394 static void tg3_free_consistent(struct tg3 *tp)
7395 {
7396         int i;
7397
7398         for (i = 0; i < tp->irq_cnt; i++) {
7399                 struct tg3_napi *tnapi = &tp->napi[i];
7400
7401                 if (tnapi->tx_ring) {
7402                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7403                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7404                         tnapi->tx_ring = NULL;
7405                 }
7406
7407                 kfree(tnapi->tx_buffers);
7408                 tnapi->tx_buffers = NULL;
7409
7410                 if (tnapi->rx_rcb) {
7411                         dma_free_coherent(&tp->pdev->dev,
7412                                           TG3_RX_RCB_RING_BYTES(tp),
7413                                           tnapi->rx_rcb,
7414                                           tnapi->rx_rcb_mapping);
7415                         tnapi->rx_rcb = NULL;
7416                 }
7417
7418                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7419
7420                 if (tnapi->hw_status) {
7421                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7422                                           tnapi->hw_status,
7423                                           tnapi->status_mapping);
7424                         tnapi->hw_status = NULL;
7425                 }
7426         }
7427
7428         if (tp->hw_stats) {
7429                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7430                                   tp->hw_stats, tp->stats_mapping);
7431                 tp->hw_stats = NULL;
7432         }
7433 }
7434
7435 /*
7436  * Must not be invoked with interrupt sources disabled and
7437  * the hardware shutdown down.  Can sleep.
7438  */
7439 static int tg3_alloc_consistent(struct tg3 *tp)
7440 {
7441         int i;
7442
7443         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7444                                           sizeof(struct tg3_hw_stats),
7445                                           &tp->stats_mapping,
7446                                           GFP_KERNEL);
7447         if (!tp->hw_stats)
7448                 goto err_out;
7449
7450         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7451
7452         for (i = 0; i < tp->irq_cnt; i++) {
7453                 struct tg3_napi *tnapi = &tp->napi[i];
7454                 struct tg3_hw_status *sblk;
7455
7456                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7457                                                       TG3_HW_STATUS_SIZE,
7458                                                       &tnapi->status_mapping,
7459                                                       GFP_KERNEL);
7460                 if (!tnapi->hw_status)
7461                         goto err_out;
7462
7463                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7464                 sblk = tnapi->hw_status;
7465
7466                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7467                         goto err_out;
7468
7469                 /* If multivector TSS is enabled, vector 0 does not handle
7470                  * tx interrupts.  Don't allocate any resources for it.
7471                  */
7472                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7473                     (i && tg3_flag(tp, ENABLE_TSS))) {
7474                         tnapi->tx_buffers = kzalloc(
7475                                                sizeof(struct tg3_tx_ring_info) *
7476                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7477                         if (!tnapi->tx_buffers)
7478                                 goto err_out;
7479
7480                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7481                                                             TG3_TX_RING_BYTES,
7482                                                         &tnapi->tx_desc_mapping,
7483                                                             GFP_KERNEL);
7484                         if (!tnapi->tx_ring)
7485                                 goto err_out;
7486                 }
7487
7488                 /*
7489                  * When RSS is enabled, the status block format changes
7490                  * slightly.  The "rx_jumbo_consumer", "reserved",
7491                  * and "rx_mini_consumer" members get mapped to the
7492                  * other three rx return ring producer indexes.
7493                  */
7494                 switch (i) {
7495                 default:
7496                         if (tg3_flag(tp, ENABLE_RSS)) {
7497                                 tnapi->rx_rcb_prod_idx = NULL;
7498                                 break;
7499                         }
7500                         /* Fall through */
7501                 case 1:
7502                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7503                         break;
7504                 case 2:
7505                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7506                         break;
7507                 case 3:
7508                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7509                         break;
7510                 case 4:
7511                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7512                         break;
7513                 }
7514
7515                 /*
7516                  * If multivector RSS is enabled, vector 0 does not handle
7517                  * rx or tx interrupts.  Don't allocate any resources for it.
7518                  */
7519                 if (!i && tg3_flag(tp, ENABLE_RSS))
7520                         continue;
7521
7522                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7523                                                    TG3_RX_RCB_RING_BYTES(tp),
7524                                                    &tnapi->rx_rcb_mapping,
7525                                                    GFP_KERNEL);
7526                 if (!tnapi->rx_rcb)
7527                         goto err_out;
7528
7529                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7530         }
7531
7532         return 0;
7533
7534 err_out:
7535         tg3_free_consistent(tp);
7536         return -ENOMEM;
7537 }
7538
7539 #define MAX_WAIT_CNT 1000
7540
7541 /* To stop a block, clear the enable bit and poll till it
7542  * clears.  tp->lock is held.
7543  */
7544 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7545 {
7546         unsigned int i;
7547         u32 val;
7548
7549         if (tg3_flag(tp, 5705_PLUS)) {
7550                 switch (ofs) {
7551                 case RCVLSC_MODE:
7552                 case DMAC_MODE:
7553                 case MBFREE_MODE:
7554                 case BUFMGR_MODE:
7555                 case MEMARB_MODE:
7556                         /* We can't enable/disable these bits of the
7557                          * 5705/5750, just say success.
7558                          */
7559                         return 0;
7560
7561                 default:
7562                         break;
7563                 }
7564         }
7565
7566         val = tr32(ofs);
7567         val &= ~enable_bit;
7568         tw32_f(ofs, val);
7569
7570         for (i = 0; i < MAX_WAIT_CNT; i++) {
7571                 udelay(100);
7572                 val = tr32(ofs);
7573                 if ((val & enable_bit) == 0)
7574                         break;
7575         }
7576
7577         if (i == MAX_WAIT_CNT && !silent) {
7578                 dev_err(&tp->pdev->dev,
7579                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7580                         ofs, enable_bit);
7581                 return -ENODEV;
7582         }
7583
7584         return 0;
7585 }
7586
7587 /* tp->lock is held. */
7588 static int tg3_abort_hw(struct tg3 *tp, int silent)
7589 {
7590         int i, err;
7591
7592         tg3_disable_ints(tp);
7593
7594         tp->rx_mode &= ~RX_MODE_ENABLE;
7595         tw32_f(MAC_RX_MODE, tp->rx_mode);
7596         udelay(10);
7597
7598         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7599         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7600         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7601         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7602         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7603         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7604
7605         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7606         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7607         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7608         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7609         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7610         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7611         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7612
7613         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7614         tw32_f(MAC_MODE, tp->mac_mode);
7615         udelay(40);
7616
7617         tp->tx_mode &= ~TX_MODE_ENABLE;
7618         tw32_f(MAC_TX_MODE, tp->tx_mode);
7619
7620         for (i = 0; i < MAX_WAIT_CNT; i++) {
7621                 udelay(100);
7622                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7623                         break;
7624         }
7625         if (i >= MAX_WAIT_CNT) {
7626                 dev_err(&tp->pdev->dev,
7627                         "%s timed out, TX_MODE_ENABLE will not clear "
7628                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7629                 err |= -ENODEV;
7630         }
7631
7632         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7633         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7634         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7635
7636         tw32(FTQ_RESET, 0xffffffff);
7637         tw32(FTQ_RESET, 0x00000000);
7638
7639         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7640         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7641
7642         for (i = 0; i < tp->irq_cnt; i++) {
7643                 struct tg3_napi *tnapi = &tp->napi[i];
7644                 if (tnapi->hw_status)
7645                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7646         }
7647         if (tp->hw_stats)
7648                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7649
7650         return err;
7651 }
7652
7653 /* Save PCI command register before chip reset */
7654 static void tg3_save_pci_state(struct tg3 *tp)
7655 {
7656         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7657 }
7658
7659 /* Restore PCI state after chip reset */
7660 static void tg3_restore_pci_state(struct tg3 *tp)
7661 {
7662         u32 val;
7663
7664         /* Re-enable indirect register accesses. */
7665         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7666                                tp->misc_host_ctrl);
7667
7668         /* Set MAX PCI retry to zero. */
7669         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7670         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7671             tg3_flag(tp, PCIX_MODE))
7672                 val |= PCISTATE_RETRY_SAME_DMA;
7673         /* Allow reads and writes to the APE register and memory space. */
7674         if (tg3_flag(tp, ENABLE_APE))
7675                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7676                        PCISTATE_ALLOW_APE_SHMEM_WR |
7677                        PCISTATE_ALLOW_APE_PSPACE_WR;
7678         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7679
7680         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7681
7682         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7683                 if (tg3_flag(tp, PCI_EXPRESS))
7684                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7685                 else {
7686                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7687                                               tp->pci_cacheline_sz);
7688                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7689                                               tp->pci_lat_timer);
7690                 }
7691         }
7692
7693         /* Make sure PCI-X relaxed ordering bit is clear. */
7694         if (tg3_flag(tp, PCIX_MODE)) {
7695                 u16 pcix_cmd;
7696
7697                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7698                                      &pcix_cmd);
7699                 pcix_cmd &= ~PCI_X_CMD_ERO;
7700                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7701                                       pcix_cmd);
7702         }
7703
7704         if (tg3_flag(tp, 5780_CLASS)) {
7705
7706                 /* Chip reset on 5780 will reset MSI enable bit,
7707                  * so need to restore it.
7708                  */
7709                 if (tg3_flag(tp, USING_MSI)) {
7710                         u16 ctrl;
7711
7712                         pci_read_config_word(tp->pdev,
7713                                              tp->msi_cap + PCI_MSI_FLAGS,
7714                                              &ctrl);
7715                         pci_write_config_word(tp->pdev,
7716                                               tp->msi_cap + PCI_MSI_FLAGS,
7717                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7718                         val = tr32(MSGINT_MODE);
7719                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7720                 }
7721         }
7722 }
7723
7724 /* tp->lock is held. */
7725 static int tg3_chip_reset(struct tg3 *tp)
7726 {
7727         u32 val;
7728         void (*write_op)(struct tg3 *, u32, u32);
7729         int i, err;
7730
7731         tg3_nvram_lock(tp);
7732
7733         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7734
7735         /* No matching tg3_nvram_unlock() after this because
7736          * chip reset below will undo the nvram lock.
7737          */
7738         tp->nvram_lock_cnt = 0;
7739
7740         /* GRC_MISC_CFG core clock reset will clear the memory
7741          * enable bit in PCI register 4 and the MSI enable bit
7742          * on some chips, so we save relevant registers here.
7743          */
7744         tg3_save_pci_state(tp);
7745
7746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7747             tg3_flag(tp, 5755_PLUS))
7748                 tw32(GRC_FASTBOOT_PC, 0);
7749
7750         /*
7751          * We must avoid the readl() that normally takes place.
7752          * It locks machines, causes machine checks, and other
7753          * fun things.  So, temporarily disable the 5701
7754          * hardware workaround, while we do the reset.
7755          */
7756         write_op = tp->write32;
7757         if (write_op == tg3_write_flush_reg32)
7758                 tp->write32 = tg3_write32;
7759
7760         /* Prevent the irq handler from reading or writing PCI registers
7761          * during chip reset when the memory enable bit in the PCI command
7762          * register may be cleared.  The chip does not generate interrupt
7763          * at this time, but the irq handler may still be called due to irq
7764          * sharing or irqpoll.
7765          */
7766         tg3_flag_set(tp, CHIP_RESETTING);
7767         for (i = 0; i < tp->irq_cnt; i++) {
7768                 struct tg3_napi *tnapi = &tp->napi[i];
7769                 if (tnapi->hw_status) {
7770                         tnapi->hw_status->status = 0;
7771                         tnapi->hw_status->status_tag = 0;
7772                 }
7773                 tnapi->last_tag = 0;
7774                 tnapi->last_irq_tag = 0;
7775         }
7776         smp_mb();
7777
7778         for (i = 0; i < tp->irq_cnt; i++)
7779                 synchronize_irq(tp->napi[i].irq_vec);
7780
7781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7782                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7783                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7784         }
7785
7786         /* do the reset */
7787         val = GRC_MISC_CFG_CORECLK_RESET;
7788
7789         if (tg3_flag(tp, PCI_EXPRESS)) {
7790                 /* Force PCIe 1.0a mode */
7791                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7792                     !tg3_flag(tp, 57765_PLUS) &&
7793                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7794                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7795                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7796
7797                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7798                         tw32(GRC_MISC_CFG, (1 << 29));
7799                         val |= (1 << 29);
7800                 }
7801         }
7802
7803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7804                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7805                 tw32(GRC_VCPU_EXT_CTRL,
7806                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7807         }
7808
7809         /* Manage gphy power for all CPMU absent PCIe devices. */
7810         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7811                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7812
7813         tw32(GRC_MISC_CFG, val);
7814
7815         /* restore 5701 hardware bug workaround write method */
7816         tp->write32 = write_op;
7817
7818         /* Unfortunately, we have to delay before the PCI read back.
7819          * Some 575X chips even will not respond to a PCI cfg access
7820          * when the reset command is given to the chip.
7821          *
7822          * How do these hardware designers expect things to work
7823          * properly if the PCI write is posted for a long period
7824          * of time?  It is always necessary to have some method by
7825          * which a register read back can occur to push the write
7826          * out which does the reset.
7827          *
7828          * For most tg3 variants the trick below was working.
7829          * Ho hum...
7830          */
7831         udelay(120);
7832
7833         /* Flush PCI posted writes.  The normal MMIO registers
7834          * are inaccessible at this time so this is the only
7835          * way to make this reliably (actually, this is no longer
7836          * the case, see above).  I tried to use indirect
7837          * register read/write but this upset some 5701 variants.
7838          */
7839         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7840
7841         udelay(120);
7842
7843         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7844                 u16 val16;
7845
7846                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7847                         int i;
7848                         u32 cfg_val;
7849
7850                         /* Wait for link training to complete.  */
7851                         for (i = 0; i < 5000; i++)
7852                                 udelay(100);
7853
7854                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7855                         pci_write_config_dword(tp->pdev, 0xc4,
7856                                                cfg_val | (1 << 15));
7857                 }
7858
7859                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7860                 pci_read_config_word(tp->pdev,
7861                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7862                                      &val16);
7863                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7864                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7865                 /*
7866                  * Older PCIe devices only support the 128 byte
7867                  * MPS setting.  Enforce the restriction.
7868                  */
7869                 if (!tg3_flag(tp, CPMU_PRESENT))
7870                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7871                 pci_write_config_word(tp->pdev,
7872                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7873                                       val16);
7874
7875                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7876
7877                 /* Clear error status */
7878                 pci_write_config_word(tp->pdev,
7879                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7880                                       PCI_EXP_DEVSTA_CED |
7881                                       PCI_EXP_DEVSTA_NFED |
7882                                       PCI_EXP_DEVSTA_FED |
7883                                       PCI_EXP_DEVSTA_URD);
7884         }
7885
7886         tg3_restore_pci_state(tp);
7887
7888         tg3_flag_clear(tp, CHIP_RESETTING);
7889         tg3_flag_clear(tp, ERROR_PROCESSED);
7890
7891         val = 0;
7892         if (tg3_flag(tp, 5780_CLASS))
7893                 val = tr32(MEMARB_MODE);
7894         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7895
7896         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7897                 tg3_stop_fw(tp);
7898                 tw32(0x5000, 0x400);
7899         }
7900
7901         tw32(GRC_MODE, tp->grc_mode);
7902
7903         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7904                 val = tr32(0xc4);
7905
7906                 tw32(0xc4, val | (1 << 15));
7907         }
7908
7909         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7911                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7912                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7913                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7914                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7915         }
7916
7917         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7918                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7919                 val = tp->mac_mode;
7920         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7921                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7922                 val = tp->mac_mode;
7923         } else
7924                 val = 0;
7925
7926         tw32_f(MAC_MODE, val);
7927         udelay(40);
7928
7929         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7930
7931         err = tg3_poll_fw(tp);
7932         if (err)
7933                 return err;
7934
7935         tg3_mdio_start(tp);
7936
7937         if (tg3_flag(tp, PCI_EXPRESS) &&
7938             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7939             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7940             !tg3_flag(tp, 57765_PLUS)) {
7941                 val = tr32(0x7c00);
7942
7943                 tw32(0x7c00, val | (1 << 25));
7944         }
7945
7946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7947                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7948                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7949         }
7950
7951         /* Reprobe ASF enable state.  */
7952         tg3_flag_clear(tp, ENABLE_ASF);
7953         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7954         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7955         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7956                 u32 nic_cfg;
7957
7958                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7959                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7960                         tg3_flag_set(tp, ENABLE_ASF);
7961                         tp->last_event_jiffies = jiffies;
7962                         if (tg3_flag(tp, 5750_PLUS))
7963                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7964                 }
7965         }
7966
7967         return 0;
7968 }
7969
7970 /* tp->lock is held. */
7971 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7972 {
7973         int err;
7974
7975         tg3_stop_fw(tp);
7976
7977         tg3_write_sig_pre_reset(tp, kind);
7978
7979         tg3_abort_hw(tp, silent);
7980         err = tg3_chip_reset(tp);
7981
7982         __tg3_set_mac_addr(tp, 0);
7983
7984         tg3_write_sig_legacy(tp, kind);
7985         tg3_write_sig_post_reset(tp, kind);
7986
7987         if (err)
7988                 return err;
7989
7990         return 0;
7991 }
7992
7993 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7994 {
7995         struct tg3 *tp = netdev_priv(dev);
7996         struct sockaddr *addr = p;
7997         int err = 0, skip_mac_1 = 0;
7998
7999         if (!is_valid_ether_addr(addr->sa_data))
8000                 return -EINVAL;
8001
8002         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8003
8004         if (!netif_running(dev))
8005                 return 0;
8006
8007         if (tg3_flag(tp, ENABLE_ASF)) {
8008                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8009
8010                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8011                 addr0_low = tr32(MAC_ADDR_0_LOW);
8012                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8013                 addr1_low = tr32(MAC_ADDR_1_LOW);
8014
8015                 /* Skip MAC addr 1 if ASF is using it. */
8016                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8017                     !(addr1_high == 0 && addr1_low == 0))
8018                         skip_mac_1 = 1;
8019         }
8020         spin_lock_bh(&tp->lock);
8021         __tg3_set_mac_addr(tp, skip_mac_1);
8022         spin_unlock_bh(&tp->lock);
8023
8024         return err;
8025 }
8026
8027 /* tp->lock is held. */
8028 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8029                            dma_addr_t mapping, u32 maxlen_flags,
8030                            u32 nic_addr)
8031 {
8032         tg3_write_mem(tp,
8033                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8034                       ((u64) mapping >> 32));
8035         tg3_write_mem(tp,
8036                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8037                       ((u64) mapping & 0xffffffff));
8038         tg3_write_mem(tp,
8039                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8040                        maxlen_flags);
8041
8042         if (!tg3_flag(tp, 5705_PLUS))
8043                 tg3_write_mem(tp,
8044                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8045                               nic_addr);
8046 }
8047
8048 static void __tg3_set_rx_mode(struct net_device *);
8049 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8050 {
8051         int i;
8052
8053         if (!tg3_flag(tp, ENABLE_TSS)) {
8054                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8055                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8056                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8057         } else {
8058                 tw32(HOSTCC_TXCOL_TICKS, 0);
8059                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8060                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8061         }
8062
8063         if (!tg3_flag(tp, ENABLE_RSS)) {
8064                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8065                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8066                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8067         } else {
8068                 tw32(HOSTCC_RXCOL_TICKS, 0);
8069                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8070                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8071         }
8072
8073         if (!tg3_flag(tp, 5705_PLUS)) {
8074                 u32 val = ec->stats_block_coalesce_usecs;
8075
8076                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8077                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8078
8079                 if (!netif_carrier_ok(tp->dev))
8080                         val = 0;
8081
8082                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8083         }
8084
8085         for (i = 0; i < tp->irq_cnt - 1; i++) {
8086                 u32 reg;
8087
8088                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8089                 tw32(reg, ec->rx_coalesce_usecs);
8090                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8091                 tw32(reg, ec->rx_max_coalesced_frames);
8092                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8093                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8094
8095                 if (tg3_flag(tp, ENABLE_TSS)) {
8096                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8097                         tw32(reg, ec->tx_coalesce_usecs);
8098                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8099                         tw32(reg, ec->tx_max_coalesced_frames);
8100                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8101                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8102                 }
8103         }
8104
8105         for (; i < tp->irq_max - 1; i++) {
8106                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8107                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8108                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8109
8110                 if (tg3_flag(tp, ENABLE_TSS)) {
8111                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8112                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8113                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8114                 }
8115         }
8116 }
8117
8118 /* tp->lock is held. */
8119 static void tg3_rings_reset(struct tg3 *tp)
8120 {
8121         int i;
8122         u32 stblk, txrcb, rxrcb, limit;
8123         struct tg3_napi *tnapi = &tp->napi[0];
8124
8125         /* Disable all transmit rings but the first. */
8126         if (!tg3_flag(tp, 5705_PLUS))
8127                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8128         else if (tg3_flag(tp, 5717_PLUS))
8129                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8130         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8131                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8132         else
8133                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8134
8135         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8136              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8137                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8138                               BDINFO_FLAGS_DISABLED);
8139
8140
8141         /* Disable all receive return rings but the first. */
8142         if (tg3_flag(tp, 5717_PLUS))
8143                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8144         else if (!tg3_flag(tp, 5705_PLUS))
8145                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8146         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8147                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8148                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8149         else
8150                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8151
8152         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8153              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8154                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8155                               BDINFO_FLAGS_DISABLED);
8156
8157         /* Disable interrupts */
8158         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8159         tp->napi[0].chk_msi_cnt = 0;
8160         tp->napi[0].last_rx_cons = 0;
8161         tp->napi[0].last_tx_cons = 0;
8162
8163         /* Zero mailbox registers. */
8164         if (tg3_flag(tp, SUPPORT_MSIX)) {
8165                 for (i = 1; i < tp->irq_max; i++) {
8166                         tp->napi[i].tx_prod = 0;
8167                         tp->napi[i].tx_cons = 0;
8168                         if (tg3_flag(tp, ENABLE_TSS))
8169                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8170                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8171                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8172                         tp->napi[i].chk_msi_cnt = 0;
8173                         tp->napi[i].last_rx_cons = 0;
8174                         tp->napi[i].last_tx_cons = 0;
8175                 }
8176                 if (!tg3_flag(tp, ENABLE_TSS))
8177                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8178         } else {
8179                 tp->napi[0].tx_prod = 0;
8180                 tp->napi[0].tx_cons = 0;
8181                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8182                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8183         }
8184
8185         /* Make sure the NIC-based send BD rings are disabled. */
8186         if (!tg3_flag(tp, 5705_PLUS)) {
8187                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8188                 for (i = 0; i < 16; i++)
8189                         tw32_tx_mbox(mbox + i * 8, 0);
8190         }
8191
8192         txrcb = NIC_SRAM_SEND_RCB;
8193         rxrcb = NIC_SRAM_RCV_RET_RCB;
8194
8195         /* Clear status block in ram. */
8196         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8197
8198         /* Set status block DMA address */
8199         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8200              ((u64) tnapi->status_mapping >> 32));
8201         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8202              ((u64) tnapi->status_mapping & 0xffffffff));
8203
8204         if (tnapi->tx_ring) {
8205                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8206                                (TG3_TX_RING_SIZE <<
8207                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8208                                NIC_SRAM_TX_BUFFER_DESC);
8209                 txrcb += TG3_BDINFO_SIZE;
8210         }
8211
8212         if (tnapi->rx_rcb) {
8213                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8214                                (tp->rx_ret_ring_mask + 1) <<
8215                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8216                 rxrcb += TG3_BDINFO_SIZE;
8217         }
8218
8219         stblk = HOSTCC_STATBLCK_RING1;
8220
8221         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8222                 u64 mapping = (u64)tnapi->status_mapping;
8223                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8224                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8225
8226                 /* Clear status block in ram. */
8227                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8228
8229                 if (tnapi->tx_ring) {
8230                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8231                                        (TG3_TX_RING_SIZE <<
8232                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8233                                        NIC_SRAM_TX_BUFFER_DESC);
8234                         txrcb += TG3_BDINFO_SIZE;
8235                 }
8236
8237                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8238                                ((tp->rx_ret_ring_mask + 1) <<
8239                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8240
8241                 stblk += 8;
8242                 rxrcb += TG3_BDINFO_SIZE;
8243         }
8244 }
8245
8246 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8247 {
8248         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8249
8250         if (!tg3_flag(tp, 5750_PLUS) ||
8251             tg3_flag(tp, 5780_CLASS) ||
8252             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8253             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8254                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8255         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8256                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8257                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8258         else
8259                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8260
8261         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8262         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8263
8264         val = min(nic_rep_thresh, host_rep_thresh);
8265         tw32(RCVBDI_STD_THRESH, val);
8266
8267         if (tg3_flag(tp, 57765_PLUS))
8268                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8269
8270         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8271                 return;
8272
8273         if (!tg3_flag(tp, 5705_PLUS))
8274                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8275         else
8276                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8277
8278         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8279
8280         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8281         tw32(RCVBDI_JUMBO_THRESH, val);
8282
8283         if (tg3_flag(tp, 57765_PLUS))
8284                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8285 }
8286
8287 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
8288 {
8289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8290                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
8291         else
8292                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
8293 }
8294
8295 /* tp->lock is held. */
8296 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8297 {
8298         u32 val, rdmac_mode;
8299         int i, err, limit;
8300         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8301
8302         tg3_disable_ints(tp);
8303
8304         tg3_stop_fw(tp);
8305
8306         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8307
8308         if (tg3_flag(tp, INIT_COMPLETE))
8309                 tg3_abort_hw(tp, 1);
8310
8311         /* Enable MAC control of LPI */
8312         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8313                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8314                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8315                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8316
8317                 tw32_f(TG3_CPMU_EEE_CTRL,
8318                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8319
8320                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8321                       TG3_CPMU_EEEMD_LPI_IN_TX |
8322                       TG3_CPMU_EEEMD_LPI_IN_RX |
8323                       TG3_CPMU_EEEMD_EEE_ENABLE;
8324
8325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8326                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8327
8328                 if (tg3_flag(tp, ENABLE_APE))
8329                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8330
8331                 tw32_f(TG3_CPMU_EEE_MODE, val);
8332
8333                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8334                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8335                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8336
8337                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8338                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8339                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8340         }
8341
8342         if (reset_phy)
8343                 tg3_phy_reset(tp);
8344
8345         err = tg3_chip_reset(tp);
8346         if (err)
8347                 return err;
8348
8349         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8350
8351         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8352                 val = tr32(TG3_CPMU_CTRL);
8353                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8354                 tw32(TG3_CPMU_CTRL, val);
8355
8356                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8357                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8358                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8359                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8360
8361                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8362                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8363                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8364                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8365
8366                 val = tr32(TG3_CPMU_HST_ACC);
8367                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8368                 val |= CPMU_HST_ACC_MACCLK_6_25;
8369                 tw32(TG3_CPMU_HST_ACC, val);
8370         }
8371
8372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8373                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8374                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8375                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8376                 tw32(PCIE_PWR_MGMT_THRESH, val);
8377
8378                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8379                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8380
8381                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8382
8383                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8384                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8385         }
8386
8387         if (tg3_flag(tp, L1PLLPD_EN)) {
8388                 u32 grc_mode = tr32(GRC_MODE);
8389
8390                 /* Access the lower 1K of PL PCIE block registers. */
8391                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8392                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8393
8394                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8395                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8396                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8397
8398                 tw32(GRC_MODE, grc_mode);
8399         }
8400
8401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8402                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8403                         u32 grc_mode = tr32(GRC_MODE);
8404
8405                         /* Access the lower 1K of PL PCIE block registers. */
8406                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8407                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8408
8409                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8410                                    TG3_PCIE_PL_LO_PHYCTL5);
8411                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8412                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8413
8414                         tw32(GRC_MODE, grc_mode);
8415                 }
8416
8417                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8418                         u32 grc_mode = tr32(GRC_MODE);
8419
8420                         /* Access the lower 1K of DL PCIE block registers. */
8421                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8422                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8423
8424                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8425                                    TG3_PCIE_DL_LO_FTSMAX);
8426                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8427                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8428                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8429
8430                         tw32(GRC_MODE, grc_mode);
8431                 }
8432
8433                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8434                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8435                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8436                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8437         }
8438
8439         /* This works around an issue with Athlon chipsets on
8440          * B3 tigon3 silicon.  This bit has no effect on any
8441          * other revision.  But do not set this on PCI Express
8442          * chips and don't even touch the clocks if the CPMU is present.
8443          */
8444         if (!tg3_flag(tp, CPMU_PRESENT)) {
8445                 if (!tg3_flag(tp, PCI_EXPRESS))
8446                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8447                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8448         }
8449
8450         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8451             tg3_flag(tp, PCIX_MODE)) {
8452                 val = tr32(TG3PCI_PCISTATE);
8453                 val |= PCISTATE_RETRY_SAME_DMA;
8454                 tw32(TG3PCI_PCISTATE, val);
8455         }
8456
8457         if (tg3_flag(tp, ENABLE_APE)) {
8458                 /* Allow reads and writes to the
8459                  * APE register and memory space.
8460                  */
8461                 val = tr32(TG3PCI_PCISTATE);
8462                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8463                        PCISTATE_ALLOW_APE_SHMEM_WR |
8464                        PCISTATE_ALLOW_APE_PSPACE_WR;
8465                 tw32(TG3PCI_PCISTATE, val);
8466         }
8467
8468         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8469                 /* Enable some hw fixes.  */
8470                 val = tr32(TG3PCI_MSI_DATA);
8471                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8472                 tw32(TG3PCI_MSI_DATA, val);
8473         }
8474
8475         /* Descriptor ring init may make accesses to the
8476          * NIC SRAM area to setup the TX descriptors, so we
8477          * can only do this after the hardware has been
8478          * successfully reset.
8479          */
8480         err = tg3_init_rings(tp);
8481         if (err)
8482                 return err;
8483
8484         if (tg3_flag(tp, 57765_PLUS)) {
8485                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8486                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8487                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8488                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8489                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8490                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8491                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8492                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8493         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8494                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8495                 /* This value is determined during the probe time DMA
8496                  * engine test, tg3_test_dma.
8497                  */
8498                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8499         }
8500
8501         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8502                           GRC_MODE_4X_NIC_SEND_RINGS |
8503                           GRC_MODE_NO_TX_PHDR_CSUM |
8504                           GRC_MODE_NO_RX_PHDR_CSUM);
8505         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8506
8507         /* Pseudo-header checksum is done by hardware logic and not
8508          * the offload processers, so make the chip do the pseudo-
8509          * header checksums on receive.  For transmit it is more
8510          * convenient to do the pseudo-header checksum in software
8511          * as Linux does that on transmit for us in all cases.
8512          */
8513         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8514
8515         tw32(GRC_MODE,
8516              tp->grc_mode |
8517              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8518
8519         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8520         val = tr32(GRC_MISC_CFG);
8521         val &= ~0xff;
8522         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8523         tw32(GRC_MISC_CFG, val);
8524
8525         /* Initialize MBUF/DESC pool. */
8526         if (tg3_flag(tp, 5750_PLUS)) {
8527                 /* Do nothing.  */
8528         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8529                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8531                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8532                 else
8533                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8534                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8535                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8536         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8537                 int fw_len;
8538
8539                 fw_len = tp->fw_len;
8540                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8541                 tw32(BUFMGR_MB_POOL_ADDR,
8542                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8543                 tw32(BUFMGR_MB_POOL_SIZE,
8544                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8545         }
8546
8547         if (tp->dev->mtu <= ETH_DATA_LEN) {
8548                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8549                      tp->bufmgr_config.mbuf_read_dma_low_water);
8550                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8551                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8552                 tw32(BUFMGR_MB_HIGH_WATER,
8553                      tp->bufmgr_config.mbuf_high_water);
8554         } else {
8555                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8556                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8557                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8558                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8559                 tw32(BUFMGR_MB_HIGH_WATER,
8560                      tp->bufmgr_config.mbuf_high_water_jumbo);
8561         }
8562         tw32(BUFMGR_DMA_LOW_WATER,
8563              tp->bufmgr_config.dma_low_water);
8564         tw32(BUFMGR_DMA_HIGH_WATER,
8565              tp->bufmgr_config.dma_high_water);
8566
8567         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8569                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8571             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8572             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8573                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8574         tw32(BUFMGR_MODE, val);
8575         for (i = 0; i < 2000; i++) {
8576                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8577                         break;
8578                 udelay(10);
8579         }
8580         if (i >= 2000) {
8581                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8582                 return -ENODEV;
8583         }
8584
8585         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8586                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8587
8588         tg3_setup_rxbd_thresholds(tp);
8589
8590         /* Initialize TG3_BDINFO's at:
8591          *  RCVDBDI_STD_BD:     standard eth size rx ring
8592          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8593          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8594          *
8595          * like so:
8596          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8597          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8598          *                              ring attribute flags
8599          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8600          *
8601          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8602          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8603          *
8604          * The size of each ring is fixed in the firmware, but the location is
8605          * configurable.
8606          */
8607         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8608              ((u64) tpr->rx_std_mapping >> 32));
8609         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8610              ((u64) tpr->rx_std_mapping & 0xffffffff));
8611         if (!tg3_flag(tp, 5717_PLUS))
8612                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8613                      NIC_SRAM_RX_BUFFER_DESC);
8614
8615         /* Disable the mini ring */
8616         if (!tg3_flag(tp, 5705_PLUS))
8617                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8618                      BDINFO_FLAGS_DISABLED);
8619
8620         /* Program the jumbo buffer descriptor ring control
8621          * blocks on those devices that have them.
8622          */
8623         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8624             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8625
8626                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8627                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8628                              ((u64) tpr->rx_jmb_mapping >> 32));
8629                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8630                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8631                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8632                               BDINFO_FLAGS_MAXLEN_SHIFT;
8633                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8634                              val | BDINFO_FLAGS_USE_EXT_RECV);
8635                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8636                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8637                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8638                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8639                 } else {
8640                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8641                              BDINFO_FLAGS_DISABLED);
8642                 }
8643
8644                 if (tg3_flag(tp, 57765_PLUS)) {
8645                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8646                                 val = TG3_RX_STD_MAX_SIZE_5700;
8647                         else
8648                                 val = TG3_RX_STD_MAX_SIZE_5717;
8649                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8650                         val |= (TG3_RX_STD_DMA_SZ << 2);
8651                 } else
8652                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8653         } else
8654                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8655
8656         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8657
8658         tpr->rx_std_prod_idx = tp->rx_pending;
8659         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8660
8661         tpr->rx_jmb_prod_idx =
8662                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8663         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8664
8665         tg3_rings_reset(tp);
8666
8667         /* Initialize MAC address and backoff seed. */
8668         __tg3_set_mac_addr(tp, 0);
8669
8670         /* MTU + ethernet header + FCS + optional VLAN tag */
8671         tw32(MAC_RX_MTU_SIZE,
8672              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8673
8674         /* The slot time is changed by tg3_setup_phy if we
8675          * run at gigabit with half duplex.
8676          */
8677         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8678               (6 << TX_LENGTHS_IPG_SHIFT) |
8679               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8680
8681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8682                 val |= tr32(MAC_TX_LENGTHS) &
8683                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8684                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8685
8686         tw32(MAC_TX_LENGTHS, val);
8687
8688         /* Receive rules. */
8689         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8690         tw32(RCVLPC_CONFIG, 0x0181);
8691
8692         /* Calculate RDMAC_MODE setting early, we need it to determine
8693          * the RCVLPC_STATE_ENABLE mask.
8694          */
8695         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8696                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8697                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8698                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8699                       RDMAC_MODE_LNGREAD_ENAB);
8700
8701         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8702                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8703
8704         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8705             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8706             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8707                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8708                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8709                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8710
8711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8712             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8713                 if (tg3_flag(tp, TSO_CAPABLE) &&
8714                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8715                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8716                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8717                            !tg3_flag(tp, IS_5788)) {
8718                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8719                 }
8720         }
8721
8722         if (tg3_flag(tp, PCI_EXPRESS))
8723                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8724
8725         if (tg3_flag(tp, HW_TSO_1) ||
8726             tg3_flag(tp, HW_TSO_2) ||
8727             tg3_flag(tp, HW_TSO_3))
8728                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8729
8730         if (tg3_flag(tp, 57765_PLUS) ||
8731             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8732             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8733                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8734
8735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8736                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8737
8738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8739             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8740             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8741             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8742             tg3_flag(tp, 57765_PLUS)) {
8743                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8744                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8745                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8746                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8747                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8748                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8749                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8750                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8751                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8752                 }
8753                 tw32(TG3_RDMA_RSRVCTRL_REG,
8754                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8755         }
8756
8757         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8758             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8759                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8760                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8761                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8762                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8763         }
8764
8765         /* Receive/send statistics. */
8766         if (tg3_flag(tp, 5750_PLUS)) {
8767                 val = tr32(RCVLPC_STATS_ENABLE);
8768                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8769                 tw32(RCVLPC_STATS_ENABLE, val);
8770         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8771                    tg3_flag(tp, TSO_CAPABLE)) {
8772                 val = tr32(RCVLPC_STATS_ENABLE);
8773                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8774                 tw32(RCVLPC_STATS_ENABLE, val);
8775         } else {
8776                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8777         }
8778         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8779         tw32(SNDDATAI_STATSENAB, 0xffffff);
8780         tw32(SNDDATAI_STATSCTRL,
8781              (SNDDATAI_SCTRL_ENABLE |
8782               SNDDATAI_SCTRL_FASTUPD));
8783
8784         /* Setup host coalescing engine. */
8785         tw32(HOSTCC_MODE, 0);
8786         for (i = 0; i < 2000; i++) {
8787                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8788                         break;
8789                 udelay(10);
8790         }
8791
8792         __tg3_set_coalesce(tp, &tp->coal);
8793
8794         if (!tg3_flag(tp, 5705_PLUS)) {
8795                 /* Status/statistics block address.  See tg3_timer,
8796                  * the tg3_periodic_fetch_stats call there, and
8797                  * tg3_get_stats to see how this works for 5705/5750 chips.
8798                  */
8799                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8800                      ((u64) tp->stats_mapping >> 32));
8801                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8802                      ((u64) tp->stats_mapping & 0xffffffff));
8803                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8804
8805                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8806
8807                 /* Clear statistics and status block memory areas */
8808                 for (i = NIC_SRAM_STATS_BLK;
8809                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8810                      i += sizeof(u32)) {
8811                         tg3_write_mem(tp, i, 0);
8812                         udelay(40);
8813                 }
8814         }
8815
8816         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8817
8818         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8819         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8820         if (!tg3_flag(tp, 5705_PLUS))
8821                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8822
8823         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8824                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8825                 /* reset to prevent losing 1st rx packet intermittently */
8826                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8827                 udelay(10);
8828         }
8829
8830         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8831                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8832                         MAC_MODE_FHDE_ENABLE;
8833         if (tg3_flag(tp, ENABLE_APE))
8834                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8835         if (!tg3_flag(tp, 5705_PLUS) &&
8836             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8837             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8838                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8839         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8840         udelay(40);
8841
8842         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8843          * If TG3_FLAG_IS_NIC is zero, we should read the
8844          * register to preserve the GPIO settings for LOMs. The GPIOs,
8845          * whether used as inputs or outputs, are set by boot code after
8846          * reset.
8847          */
8848         if (!tg3_flag(tp, IS_NIC)) {
8849                 u32 gpio_mask;
8850
8851                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8852                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8853                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8854
8855                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8856                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8857                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8858
8859                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8860                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8861
8862                 tp->grc_local_ctrl &= ~gpio_mask;
8863                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8864
8865                 /* GPIO1 must be driven high for eeprom write protect */
8866                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8867                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8868                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8869         }
8870         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8871         udelay(100);
8872
8873         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8874                 val = tr32(MSGINT_MODE);
8875                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8876                 if (!tg3_flag(tp, 1SHOT_MSI))
8877                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8878                 tw32(MSGINT_MODE, val);
8879         }
8880
8881         if (!tg3_flag(tp, 5705_PLUS)) {
8882                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8883                 udelay(40);
8884         }
8885
8886         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8887                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8888                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8889                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8890                WDMAC_MODE_LNGREAD_ENAB);
8891
8892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8893             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8894                 if (tg3_flag(tp, TSO_CAPABLE) &&
8895                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8896                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8897                         /* nothing */
8898                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8899                            !tg3_flag(tp, IS_5788)) {
8900                         val |= WDMAC_MODE_RX_ACCEL;
8901                 }
8902         }
8903
8904         /* Enable host coalescing bug fix */
8905         if (tg3_flag(tp, 5755_PLUS))
8906                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8907
8908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8909                 val |= WDMAC_MODE_BURST_ALL_DATA;
8910
8911         tw32_f(WDMAC_MODE, val);
8912         udelay(40);
8913
8914         if (tg3_flag(tp, PCIX_MODE)) {
8915                 u16 pcix_cmd;
8916
8917                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8918                                      &pcix_cmd);
8919                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8920                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8921                         pcix_cmd |= PCI_X_CMD_READ_2K;
8922                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8923                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8924                         pcix_cmd |= PCI_X_CMD_READ_2K;
8925                 }
8926                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8927                                       pcix_cmd);
8928         }
8929
8930         tw32_f(RDMAC_MODE, rdmac_mode);
8931         udelay(40);
8932
8933         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8935                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
8936                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
8937                                 break;
8938                 }
8939                 if (i < TG3_NUM_RDMA_CHANNELS) {
8940                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8941                         val |= tg3_lso_rd_dma_workaround_bit(tp);
8942                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
8943                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
8944                 }
8945         }
8946
8947         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8948         if (!tg3_flag(tp, 5705_PLUS))
8949                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8950
8951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8952                 tw32(SNDDATAC_MODE,
8953                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8954         else
8955                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8956
8957         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8958         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8959         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8960         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8961                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8962         tw32(RCVDBDI_MODE, val);
8963         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8964         if (tg3_flag(tp, HW_TSO_1) ||
8965             tg3_flag(tp, HW_TSO_2) ||
8966             tg3_flag(tp, HW_TSO_3))
8967                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8968         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8969         if (tg3_flag(tp, ENABLE_TSS))
8970                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8971         tw32(SNDBDI_MODE, val);
8972         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8973
8974         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8975                 err = tg3_load_5701_a0_firmware_fix(tp);
8976                 if (err)
8977                         return err;
8978         }
8979
8980         if (tg3_flag(tp, TSO_CAPABLE)) {
8981                 err = tg3_load_tso_firmware(tp);
8982                 if (err)
8983                         return err;
8984         }
8985
8986         tp->tx_mode = TX_MODE_ENABLE;
8987
8988         if (tg3_flag(tp, 5755_PLUS) ||
8989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8990                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8991
8992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8993                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8994                 tp->tx_mode &= ~val;
8995                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8996         }
8997
8998         tw32_f(MAC_TX_MODE, tp->tx_mode);
8999         udelay(100);
9000
9001         if (tg3_flag(tp, ENABLE_RSS)) {
9002                 int i = 0;
9003                 u32 reg = MAC_RSS_INDIR_TBL_0;
9004
9005                 if (tp->irq_cnt == 2) {
9006                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
9007                                 tw32(reg, 0x0);
9008                                 reg += 4;
9009                         }
9010                 } else {
9011                         u32 val;
9012
9013                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9014                                 val = i % (tp->irq_cnt - 1);
9015                                 i++;
9016                                 for (; i % 8; i++) {
9017                                         val <<= 4;
9018                                         val |= (i % (tp->irq_cnt - 1));
9019                                 }
9020                                 tw32(reg, val);
9021                                 reg += 4;
9022                         }
9023                 }
9024
9025                 /* Setup the "secret" hash key. */
9026                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9027                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9028                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9029                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9030                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9031                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9032                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9033                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9034                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9035                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9036         }
9037
9038         tp->rx_mode = RX_MODE_ENABLE;
9039         if (tg3_flag(tp, 5755_PLUS))
9040                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9041
9042         if (tg3_flag(tp, ENABLE_RSS))
9043                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9044                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9045                                RX_MODE_RSS_IPV6_HASH_EN |
9046                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9047                                RX_MODE_RSS_IPV4_HASH_EN |
9048                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9049
9050         tw32_f(MAC_RX_MODE, tp->rx_mode);
9051         udelay(10);
9052
9053         tw32(MAC_LED_CTRL, tp->led_ctrl);
9054
9055         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9056         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9057                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9058                 udelay(10);
9059         }
9060         tw32_f(MAC_RX_MODE, tp->rx_mode);
9061         udelay(10);
9062
9063         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9064                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9065                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9066                         /* Set drive transmission level to 1.2V  */
9067                         /* only if the signal pre-emphasis bit is not set  */
9068                         val = tr32(MAC_SERDES_CFG);
9069                         val &= 0xfffff000;
9070                         val |= 0x880;
9071                         tw32(MAC_SERDES_CFG, val);
9072                 }
9073                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9074                         tw32(MAC_SERDES_CFG, 0x616000);
9075         }
9076
9077         /* Prevent chip from dropping frames when flow control
9078          * is enabled.
9079          */
9080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9081                 val = 1;
9082         else
9083                 val = 2;
9084         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9085
9086         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9087             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9088                 /* Use hardware link auto-negotiation */
9089                 tg3_flag_set(tp, HW_AUTONEG);
9090         }
9091
9092         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9093             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9094                 u32 tmp;
9095
9096                 tmp = tr32(SERDES_RX_CTRL);
9097                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9098                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9099                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9100                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9101         }
9102
9103         if (!tg3_flag(tp, USE_PHYLIB)) {
9104                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9105                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9106                         tp->link_config.speed = tp->link_config.orig_speed;
9107                         tp->link_config.duplex = tp->link_config.orig_duplex;
9108                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9109                 }
9110
9111                 err = tg3_setup_phy(tp, 0);
9112                 if (err)
9113                         return err;
9114
9115                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9116                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9117                         u32 tmp;
9118
9119                         /* Clear CRC stats. */
9120                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9121                                 tg3_writephy(tp, MII_TG3_TEST1,
9122                                              tmp | MII_TG3_TEST1_CRC_EN);
9123                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9124                         }
9125                 }
9126         }
9127
9128         __tg3_set_rx_mode(tp->dev);
9129
9130         /* Initialize receive rules. */
9131         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9132         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9133         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9134         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9135
9136         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9137                 limit = 8;
9138         else
9139                 limit = 16;
9140         if (tg3_flag(tp, ENABLE_ASF))
9141                 limit -= 4;
9142         switch (limit) {
9143         case 16:
9144                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9145         case 15:
9146                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9147         case 14:
9148                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9149         case 13:
9150                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9151         case 12:
9152                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9153         case 11:
9154                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9155         case 10:
9156                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9157         case 9:
9158                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9159         case 8:
9160                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9161         case 7:
9162                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9163         case 6:
9164                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9165         case 5:
9166                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9167         case 4:
9168                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9169         case 3:
9170                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9171         case 2:
9172         case 1:
9173
9174         default:
9175                 break;
9176         }
9177
9178         if (tg3_flag(tp, ENABLE_APE))
9179                 /* Write our heartbeat update interval to APE. */
9180                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9181                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9182
9183         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9184
9185         return 0;
9186 }
9187
9188 /* Called at device open time to get the chip ready for
9189  * packet processing.  Invoked with tp->lock held.
9190  */
9191 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9192 {
9193         /* Chip may have been just powered on. If so, the boot code may still
9194          * be running initialization. Wait for it to finish to avoid races in
9195          * accessing the hardware.
9196          */
9197         tg3_enable_register_access(tp);
9198         tg3_poll_fw(tp);
9199
9200         tg3_switch_clocks(tp);
9201
9202         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9203
9204         return tg3_reset_hw(tp, reset_phy);
9205 }
9206
9207 #define TG3_STAT_ADD32(PSTAT, REG) \
9208 do {    u32 __val = tr32(REG); \
9209         (PSTAT)->low += __val; \
9210         if ((PSTAT)->low < __val) \
9211                 (PSTAT)->high += 1; \
9212 } while (0)
9213
9214 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9215 {
9216         struct tg3_hw_stats *sp = tp->hw_stats;
9217
9218         if (!netif_carrier_ok(tp->dev))
9219                 return;
9220
9221         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9222         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9223         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9224         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9225         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9226         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9227         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9228         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9229         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9230         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9231         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9232         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9233         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9234         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
9235                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9236                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9237                 u32 val;
9238
9239                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9240                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
9241                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9242                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
9243         }
9244
9245         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9246         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9247         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9248         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9249         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9250         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9251         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9252         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9253         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9254         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9255         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9256         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9257         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9258         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9259
9260         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9261         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9262             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9263             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9264                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9265         } else {
9266                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9267                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9268                 if (val) {
9269                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9270                         sp->rx_discards.low += val;
9271                         if (sp->rx_discards.low < val)
9272                                 sp->rx_discards.high += 1;
9273                 }
9274                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9275         }
9276         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9277 }
9278
9279 static void tg3_chk_missed_msi(struct tg3 *tp)
9280 {
9281         u32 i;
9282
9283         for (i = 0; i < tp->irq_cnt; i++) {
9284                 struct tg3_napi *tnapi = &tp->napi[i];
9285
9286                 if (tg3_has_work(tnapi)) {
9287                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9288                             tnapi->last_tx_cons == tnapi->tx_cons) {
9289                                 if (tnapi->chk_msi_cnt < 1) {
9290                                         tnapi->chk_msi_cnt++;
9291                                         return;
9292                                 }
9293                                 tg3_msi(0, tnapi);
9294                         }
9295                 }
9296                 tnapi->chk_msi_cnt = 0;
9297                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9298                 tnapi->last_tx_cons = tnapi->tx_cons;
9299         }
9300 }
9301
9302 static void tg3_timer(unsigned long __opaque)
9303 {
9304         struct tg3 *tp = (struct tg3 *) __opaque;
9305
9306         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9307                 goto restart_timer;
9308
9309         spin_lock(&tp->lock);
9310
9311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9312             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9313                 tg3_chk_missed_msi(tp);
9314
9315         if (!tg3_flag(tp, TAGGED_STATUS)) {
9316                 /* All of this garbage is because when using non-tagged
9317                  * IRQ status the mailbox/status_block protocol the chip
9318                  * uses with the cpu is race prone.
9319                  */
9320                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9321                         tw32(GRC_LOCAL_CTRL,
9322                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9323                 } else {
9324                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9325                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9326                 }
9327
9328                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9329                         spin_unlock(&tp->lock);
9330                         tg3_reset_task_schedule(tp);
9331                         goto restart_timer;
9332                 }
9333         }
9334
9335         /* This part only runs once per second. */
9336         if (!--tp->timer_counter) {
9337                 if (tg3_flag(tp, 5705_PLUS))
9338                         tg3_periodic_fetch_stats(tp);
9339
9340                 if (tp->setlpicnt && !--tp->setlpicnt)
9341                         tg3_phy_eee_enable(tp);
9342
9343                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9344                         u32 mac_stat;
9345                         int phy_event;
9346
9347                         mac_stat = tr32(MAC_STATUS);
9348
9349                         phy_event = 0;
9350                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9351                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9352                                         phy_event = 1;
9353                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9354                                 phy_event = 1;
9355
9356                         if (phy_event)
9357                                 tg3_setup_phy(tp, 0);
9358                 } else if (tg3_flag(tp, POLL_SERDES)) {
9359                         u32 mac_stat = tr32(MAC_STATUS);
9360                         int need_setup = 0;
9361
9362                         if (netif_carrier_ok(tp->dev) &&
9363                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9364                                 need_setup = 1;
9365                         }
9366                         if (!netif_carrier_ok(tp->dev) &&
9367                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9368                                          MAC_STATUS_SIGNAL_DET))) {
9369                                 need_setup = 1;
9370                         }
9371                         if (need_setup) {
9372                                 if (!tp->serdes_counter) {
9373                                         tw32_f(MAC_MODE,
9374                                              (tp->mac_mode &
9375                                               ~MAC_MODE_PORT_MODE_MASK));
9376                                         udelay(40);
9377                                         tw32_f(MAC_MODE, tp->mac_mode);
9378                                         udelay(40);
9379                                 }
9380                                 tg3_setup_phy(tp, 0);
9381                         }
9382                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9383                            tg3_flag(tp, 5780_CLASS)) {
9384                         tg3_serdes_parallel_detect(tp);
9385                 }
9386
9387                 tp->timer_counter = tp->timer_multiplier;
9388         }
9389
9390         /* Heartbeat is only sent once every 2 seconds.
9391          *
9392          * The heartbeat is to tell the ASF firmware that the host
9393          * driver is still alive.  In the event that the OS crashes,
9394          * ASF needs to reset the hardware to free up the FIFO space
9395          * that may be filled with rx packets destined for the host.
9396          * If the FIFO is full, ASF will no longer function properly.
9397          *
9398          * Unintended resets have been reported on real time kernels
9399          * where the timer doesn't run on time.  Netpoll will also have
9400          * same problem.
9401          *
9402          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9403          * to check the ring condition when the heartbeat is expiring
9404          * before doing the reset.  This will prevent most unintended
9405          * resets.
9406          */
9407         if (!--tp->asf_counter) {
9408                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9409                         tg3_wait_for_event_ack(tp);
9410
9411                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9412                                       FWCMD_NICDRV_ALIVE3);
9413                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9414                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9415                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9416
9417                         tg3_generate_fw_event(tp);
9418                 }
9419                 tp->asf_counter = tp->asf_multiplier;
9420         }
9421
9422         spin_unlock(&tp->lock);
9423
9424 restart_timer:
9425         tp->timer.expires = jiffies + tp->timer_offset;
9426         add_timer(&tp->timer);
9427 }
9428
9429 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9430 {
9431         irq_handler_t fn;
9432         unsigned long flags;
9433         char *name;
9434         struct tg3_napi *tnapi = &tp->napi[irq_num];
9435
9436         if (tp->irq_cnt == 1)
9437                 name = tp->dev->name;
9438         else {
9439                 name = &tnapi->irq_lbl[0];
9440                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9441                 name[IFNAMSIZ-1] = 0;
9442         }
9443
9444         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9445                 fn = tg3_msi;
9446                 if (tg3_flag(tp, 1SHOT_MSI))
9447                         fn = tg3_msi_1shot;
9448                 flags = 0;
9449         } else {
9450                 fn = tg3_interrupt;
9451                 if (tg3_flag(tp, TAGGED_STATUS))
9452                         fn = tg3_interrupt_tagged;
9453                 flags = IRQF_SHARED;
9454         }
9455
9456         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9457 }
9458
9459 static int tg3_test_interrupt(struct tg3 *tp)
9460 {
9461         struct tg3_napi *tnapi = &tp->napi[0];
9462         struct net_device *dev = tp->dev;
9463         int err, i, intr_ok = 0;
9464         u32 val;
9465
9466         if (!netif_running(dev))
9467                 return -ENODEV;
9468
9469         tg3_disable_ints(tp);
9470
9471         free_irq(tnapi->irq_vec, tnapi);
9472
9473         /*
9474          * Turn off MSI one shot mode.  Otherwise this test has no
9475          * observable way to know whether the interrupt was delivered.
9476          */
9477         if (tg3_flag(tp, 57765_PLUS)) {
9478                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9479                 tw32(MSGINT_MODE, val);
9480         }
9481
9482         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9483                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9484         if (err)
9485                 return err;
9486
9487         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9488         tg3_enable_ints(tp);
9489
9490         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9491                tnapi->coal_now);
9492
9493         for (i = 0; i < 5; i++) {
9494                 u32 int_mbox, misc_host_ctrl;
9495
9496                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9497                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9498
9499                 if ((int_mbox != 0) ||
9500                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9501                         intr_ok = 1;
9502                         break;
9503                 }
9504
9505                 if (tg3_flag(tp, 57765_PLUS) &&
9506                     tnapi->hw_status->status_tag != tnapi->last_tag)
9507                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9508
9509                 msleep(10);
9510         }
9511
9512         tg3_disable_ints(tp);
9513
9514         free_irq(tnapi->irq_vec, tnapi);
9515
9516         err = tg3_request_irq(tp, 0);
9517
9518         if (err)
9519                 return err;
9520
9521         if (intr_ok) {
9522                 /* Reenable MSI one shot mode. */
9523                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9524                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9525                         tw32(MSGINT_MODE, val);
9526                 }
9527                 return 0;
9528         }
9529
9530         return -EIO;
9531 }
9532
9533 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9534  * successfully restored
9535  */
9536 static int tg3_test_msi(struct tg3 *tp)
9537 {
9538         int err;
9539         u16 pci_cmd;
9540
9541         if (!tg3_flag(tp, USING_MSI))
9542                 return 0;
9543
9544         /* Turn off SERR reporting in case MSI terminates with Master
9545          * Abort.
9546          */
9547         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9548         pci_write_config_word(tp->pdev, PCI_COMMAND,
9549                               pci_cmd & ~PCI_COMMAND_SERR);
9550
9551         err = tg3_test_interrupt(tp);
9552
9553         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9554
9555         if (!err)
9556                 return 0;
9557
9558         /* other failures */
9559         if (err != -EIO)
9560                 return err;
9561
9562         /* MSI test failed, go back to INTx mode */
9563         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9564                     "to INTx mode. Please report this failure to the PCI "
9565                     "maintainer and include system chipset information\n");
9566
9567         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9568
9569         pci_disable_msi(tp->pdev);
9570
9571         tg3_flag_clear(tp, USING_MSI);
9572         tp->napi[0].irq_vec = tp->pdev->irq;
9573
9574         err = tg3_request_irq(tp, 0);
9575         if (err)
9576                 return err;
9577
9578         /* Need to reset the chip because the MSI cycle may have terminated
9579          * with Master Abort.
9580          */
9581         tg3_full_lock(tp, 1);
9582
9583         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9584         err = tg3_init_hw(tp, 1);
9585
9586         tg3_full_unlock(tp);
9587
9588         if (err)
9589                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9590
9591         return err;
9592 }
9593
9594 static int tg3_request_firmware(struct tg3 *tp)
9595 {
9596         const __be32 *fw_data;
9597
9598         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9599                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9600                            tp->fw_needed);
9601                 return -ENOENT;
9602         }
9603
9604         fw_data = (void *)tp->fw->data;
9605
9606         /* Firmware blob starts with version numbers, followed by
9607          * start address and _full_ length including BSS sections
9608          * (which must be longer than the actual data, of course
9609          */
9610
9611         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9612         if (tp->fw_len < (tp->fw->size - 12)) {
9613                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9614                            tp->fw_len, tp->fw_needed);
9615                 release_firmware(tp->fw);
9616                 tp->fw = NULL;
9617                 return -EINVAL;
9618         }
9619
9620         /* We no longer need firmware; we have it. */
9621         tp->fw_needed = NULL;
9622         return 0;
9623 }
9624
9625 static bool tg3_enable_msix(struct tg3 *tp)
9626 {
9627         int i, rc, cpus = num_online_cpus();
9628         struct msix_entry msix_ent[tp->irq_max];
9629
9630         if (cpus == 1)
9631                 /* Just fallback to the simpler MSI mode. */
9632                 return false;
9633
9634         /*
9635          * We want as many rx rings enabled as there are cpus.
9636          * The first MSIX vector only deals with link interrupts, etc,
9637          * so we add one to the number of vectors we are requesting.
9638          */
9639         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9640
9641         for (i = 0; i < tp->irq_max; i++) {
9642                 msix_ent[i].entry  = i;
9643                 msix_ent[i].vector = 0;
9644         }
9645
9646         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9647         if (rc < 0) {
9648                 return false;
9649         } else if (rc != 0) {
9650                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9651                         return false;
9652                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9653                               tp->irq_cnt, rc);
9654                 tp->irq_cnt = rc;
9655         }
9656
9657         for (i = 0; i < tp->irq_max; i++)
9658                 tp->napi[i].irq_vec = msix_ent[i].vector;
9659
9660         netif_set_real_num_tx_queues(tp->dev, 1);
9661         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9662         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9663                 pci_disable_msix(tp->pdev);
9664                 return false;
9665         }
9666
9667         if (tp->irq_cnt > 1) {
9668                 tg3_flag_set(tp, ENABLE_RSS);
9669
9670                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9671                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9672                         tg3_flag_set(tp, ENABLE_TSS);
9673                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9674                 }
9675         }
9676
9677         return true;
9678 }
9679
9680 static void tg3_ints_init(struct tg3 *tp)
9681 {
9682         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9683             !tg3_flag(tp, TAGGED_STATUS)) {
9684                 /* All MSI supporting chips should support tagged
9685                  * status.  Assert that this is the case.
9686                  */
9687                 netdev_warn(tp->dev,
9688                             "MSI without TAGGED_STATUS? Not using MSI\n");
9689                 goto defcfg;
9690         }
9691
9692         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9693                 tg3_flag_set(tp, USING_MSIX);
9694         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9695                 tg3_flag_set(tp, USING_MSI);
9696
9697         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9698                 u32 msi_mode = tr32(MSGINT_MODE);
9699                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9700                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9701                 if (!tg3_flag(tp, 1SHOT_MSI))
9702                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9703                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9704         }
9705 defcfg:
9706         if (!tg3_flag(tp, USING_MSIX)) {
9707                 tp->irq_cnt = 1;
9708                 tp->napi[0].irq_vec = tp->pdev->irq;
9709                 netif_set_real_num_tx_queues(tp->dev, 1);
9710                 netif_set_real_num_rx_queues(tp->dev, 1);
9711         }
9712 }
9713
9714 static void tg3_ints_fini(struct tg3 *tp)
9715 {
9716         if (tg3_flag(tp, USING_MSIX))
9717                 pci_disable_msix(tp->pdev);
9718         else if (tg3_flag(tp, USING_MSI))
9719                 pci_disable_msi(tp->pdev);
9720         tg3_flag_clear(tp, USING_MSI);
9721         tg3_flag_clear(tp, USING_MSIX);
9722         tg3_flag_clear(tp, ENABLE_RSS);
9723         tg3_flag_clear(tp, ENABLE_TSS);
9724 }
9725
9726 static int tg3_open(struct net_device *dev)
9727 {
9728         struct tg3 *tp = netdev_priv(dev);
9729         int i, err;
9730
9731         if (tp->fw_needed) {
9732                 err = tg3_request_firmware(tp);
9733                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9734                         if (err)
9735                                 return err;
9736                 } else if (err) {
9737                         netdev_warn(tp->dev, "TSO capability disabled\n");
9738                         tg3_flag_clear(tp, TSO_CAPABLE);
9739                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9740                         netdev_notice(tp->dev, "TSO capability restored\n");
9741                         tg3_flag_set(tp, TSO_CAPABLE);
9742                 }
9743         }
9744
9745         netif_carrier_off(tp->dev);
9746
9747         err = tg3_power_up(tp);
9748         if (err)
9749                 return err;
9750
9751         tg3_full_lock(tp, 0);
9752
9753         tg3_disable_ints(tp);
9754         tg3_flag_clear(tp, INIT_COMPLETE);
9755
9756         tg3_full_unlock(tp);
9757
9758         /*
9759          * Setup interrupts first so we know how
9760          * many NAPI resources to allocate
9761          */
9762         tg3_ints_init(tp);
9763
9764         /* The placement of this call is tied
9765          * to the setup and use of Host TX descriptors.
9766          */
9767         err = tg3_alloc_consistent(tp);
9768         if (err)
9769                 goto err_out1;
9770
9771         tg3_napi_init(tp);
9772
9773         tg3_napi_enable(tp);
9774
9775         for (i = 0; i < tp->irq_cnt; i++) {
9776                 struct tg3_napi *tnapi = &tp->napi[i];
9777                 err = tg3_request_irq(tp, i);
9778                 if (err) {
9779                         for (i--; i >= 0; i--) {
9780                                 tnapi = &tp->napi[i];
9781                                 free_irq(tnapi->irq_vec, tnapi);
9782                         }
9783                         goto err_out2;
9784                 }
9785         }
9786
9787         tg3_full_lock(tp, 0);
9788
9789         err = tg3_init_hw(tp, 1);
9790         if (err) {
9791                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9792                 tg3_free_rings(tp);
9793         } else {
9794                 if (tg3_flag(tp, TAGGED_STATUS) &&
9795                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9796                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9797                         tp->timer_offset = HZ;
9798                 else
9799                         tp->timer_offset = HZ / 10;
9800
9801                 BUG_ON(tp->timer_offset > HZ);
9802                 tp->timer_counter = tp->timer_multiplier =
9803                         (HZ / tp->timer_offset);
9804                 tp->asf_counter = tp->asf_multiplier =
9805                         ((HZ / tp->timer_offset) * 2);
9806
9807                 init_timer(&tp->timer);
9808                 tp->timer.expires = jiffies + tp->timer_offset;
9809                 tp->timer.data = (unsigned long) tp;
9810                 tp->timer.function = tg3_timer;
9811         }
9812
9813         tg3_full_unlock(tp);
9814
9815         if (err)
9816                 goto err_out3;
9817
9818         if (tg3_flag(tp, USING_MSI)) {
9819                 err = tg3_test_msi(tp);
9820
9821                 if (err) {
9822                         tg3_full_lock(tp, 0);
9823                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9824                         tg3_free_rings(tp);
9825                         tg3_full_unlock(tp);
9826
9827                         goto err_out2;
9828                 }
9829
9830                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9831                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9832
9833                         tw32(PCIE_TRANSACTION_CFG,
9834                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9835                 }
9836         }
9837
9838         tg3_phy_start(tp);
9839
9840         tg3_full_lock(tp, 0);
9841
9842         add_timer(&tp->timer);
9843         tg3_flag_set(tp, INIT_COMPLETE);
9844         tg3_enable_ints(tp);
9845
9846         tg3_full_unlock(tp);
9847
9848         netif_tx_start_all_queues(dev);
9849
9850         /*
9851          * Reset loopback feature if it was turned on while the device was down
9852          * make sure that it's installed properly now.
9853          */
9854         if (dev->features & NETIF_F_LOOPBACK)
9855                 tg3_set_loopback(dev, dev->features);
9856
9857         return 0;
9858
9859 err_out3:
9860         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9861                 struct tg3_napi *tnapi = &tp->napi[i];
9862                 free_irq(tnapi->irq_vec, tnapi);
9863         }
9864
9865 err_out2:
9866         tg3_napi_disable(tp);
9867         tg3_napi_fini(tp);
9868         tg3_free_consistent(tp);
9869
9870 err_out1:
9871         tg3_ints_fini(tp);
9872         tg3_frob_aux_power(tp, false);
9873         pci_set_power_state(tp->pdev, PCI_D3hot);
9874         return err;
9875 }
9876
9877 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9878                                                  struct rtnl_link_stats64 *);
9879 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9880
9881 static int tg3_close(struct net_device *dev)
9882 {
9883         int i;
9884         struct tg3 *tp = netdev_priv(dev);
9885
9886         tg3_napi_disable(tp);
9887         tg3_reset_task_cancel(tp);
9888
9889         netif_tx_stop_all_queues(dev);
9890
9891         del_timer_sync(&tp->timer);
9892
9893         tg3_phy_stop(tp);
9894
9895         tg3_full_lock(tp, 1);
9896
9897         tg3_disable_ints(tp);
9898
9899         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9900         tg3_free_rings(tp);
9901         tg3_flag_clear(tp, INIT_COMPLETE);
9902
9903         tg3_full_unlock(tp);
9904
9905         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9906                 struct tg3_napi *tnapi = &tp->napi[i];
9907                 free_irq(tnapi->irq_vec, tnapi);
9908         }
9909
9910         tg3_ints_fini(tp);
9911
9912         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9913
9914         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9915                sizeof(tp->estats_prev));
9916
9917         tg3_napi_fini(tp);
9918
9919         tg3_free_consistent(tp);
9920
9921         tg3_power_down(tp);
9922
9923         netif_carrier_off(tp->dev);
9924
9925         return 0;
9926 }
9927
9928 static inline u64 get_stat64(tg3_stat64_t *val)
9929 {
9930        return ((u64)val->high << 32) | ((u64)val->low);
9931 }
9932
9933 static u64 calc_crc_errors(struct tg3 *tp)
9934 {
9935         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9936
9937         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9938             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9939              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9940                 u32 val;
9941
9942                 spin_lock_bh(&tp->lock);
9943                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9944                         tg3_writephy(tp, MII_TG3_TEST1,
9945                                      val | MII_TG3_TEST1_CRC_EN);
9946                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9947                 } else
9948                         val = 0;
9949                 spin_unlock_bh(&tp->lock);
9950
9951                 tp->phy_crc_errors += val;
9952
9953                 return tp->phy_crc_errors;
9954         }
9955
9956         return get_stat64(&hw_stats->rx_fcs_errors);
9957 }
9958
9959 #define ESTAT_ADD(member) \
9960         estats->member =        old_estats->member + \
9961                                 get_stat64(&hw_stats->member)
9962
9963 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9964 {
9965         struct tg3_ethtool_stats *estats = &tp->estats;
9966         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9967         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9968
9969         if (!hw_stats)
9970                 return old_estats;
9971
9972         ESTAT_ADD(rx_octets);
9973         ESTAT_ADD(rx_fragments);
9974         ESTAT_ADD(rx_ucast_packets);
9975         ESTAT_ADD(rx_mcast_packets);
9976         ESTAT_ADD(rx_bcast_packets);
9977         ESTAT_ADD(rx_fcs_errors);
9978         ESTAT_ADD(rx_align_errors);
9979         ESTAT_ADD(rx_xon_pause_rcvd);
9980         ESTAT_ADD(rx_xoff_pause_rcvd);
9981         ESTAT_ADD(rx_mac_ctrl_rcvd);
9982         ESTAT_ADD(rx_xoff_entered);
9983         ESTAT_ADD(rx_frame_too_long_errors);
9984         ESTAT_ADD(rx_jabbers);
9985         ESTAT_ADD(rx_undersize_packets);
9986         ESTAT_ADD(rx_in_length_errors);
9987         ESTAT_ADD(rx_out_length_errors);
9988         ESTAT_ADD(rx_64_or_less_octet_packets);
9989         ESTAT_ADD(rx_65_to_127_octet_packets);
9990         ESTAT_ADD(rx_128_to_255_octet_packets);
9991         ESTAT_ADD(rx_256_to_511_octet_packets);
9992         ESTAT_ADD(rx_512_to_1023_octet_packets);
9993         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9994         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9995         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9996         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9997         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9998
9999         ESTAT_ADD(tx_octets);
10000         ESTAT_ADD(tx_collisions);
10001         ESTAT_ADD(tx_xon_sent);
10002         ESTAT_ADD(tx_xoff_sent);
10003         ESTAT_ADD(tx_flow_control);
10004         ESTAT_ADD(tx_mac_errors);
10005         ESTAT_ADD(tx_single_collisions);
10006         ESTAT_ADD(tx_mult_collisions);
10007         ESTAT_ADD(tx_deferred);
10008         ESTAT_ADD(tx_excessive_collisions);
10009         ESTAT_ADD(tx_late_collisions);
10010         ESTAT_ADD(tx_collide_2times);
10011         ESTAT_ADD(tx_collide_3times);
10012         ESTAT_ADD(tx_collide_4times);
10013         ESTAT_ADD(tx_collide_5times);
10014         ESTAT_ADD(tx_collide_6times);
10015         ESTAT_ADD(tx_collide_7times);
10016         ESTAT_ADD(tx_collide_8times);
10017         ESTAT_ADD(tx_collide_9times);
10018         ESTAT_ADD(tx_collide_10times);
10019         ESTAT_ADD(tx_collide_11times);
10020         ESTAT_ADD(tx_collide_12times);
10021         ESTAT_ADD(tx_collide_13times);
10022         ESTAT_ADD(tx_collide_14times);
10023         ESTAT_ADD(tx_collide_15times);
10024         ESTAT_ADD(tx_ucast_packets);
10025         ESTAT_ADD(tx_mcast_packets);
10026         ESTAT_ADD(tx_bcast_packets);
10027         ESTAT_ADD(tx_carrier_sense_errors);
10028         ESTAT_ADD(tx_discards);
10029         ESTAT_ADD(tx_errors);
10030
10031         ESTAT_ADD(dma_writeq_full);
10032         ESTAT_ADD(dma_write_prioq_full);
10033         ESTAT_ADD(rxbds_empty);
10034         ESTAT_ADD(rx_discards);
10035         ESTAT_ADD(rx_errors);
10036         ESTAT_ADD(rx_threshold_hit);
10037
10038         ESTAT_ADD(dma_readq_full);
10039         ESTAT_ADD(dma_read_prioq_full);
10040         ESTAT_ADD(tx_comp_queue_full);
10041
10042         ESTAT_ADD(ring_set_send_prod_index);
10043         ESTAT_ADD(ring_status_update);
10044         ESTAT_ADD(nic_irqs);
10045         ESTAT_ADD(nic_avoided_irqs);
10046         ESTAT_ADD(nic_tx_threshold_hit);
10047
10048         ESTAT_ADD(mbuf_lwm_thresh_hit);
10049
10050         return estats;
10051 }
10052
10053 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10054                                                  struct rtnl_link_stats64 *stats)
10055 {
10056         struct tg3 *tp = netdev_priv(dev);
10057         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10058         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10059
10060         if (!hw_stats)
10061                 return old_stats;
10062
10063         stats->rx_packets = old_stats->rx_packets +
10064                 get_stat64(&hw_stats->rx_ucast_packets) +
10065                 get_stat64(&hw_stats->rx_mcast_packets) +
10066                 get_stat64(&hw_stats->rx_bcast_packets);
10067
10068         stats->tx_packets = old_stats->tx_packets +
10069                 get_stat64(&hw_stats->tx_ucast_packets) +
10070                 get_stat64(&hw_stats->tx_mcast_packets) +
10071                 get_stat64(&hw_stats->tx_bcast_packets);
10072
10073         stats->rx_bytes = old_stats->rx_bytes +
10074                 get_stat64(&hw_stats->rx_octets);
10075         stats->tx_bytes = old_stats->tx_bytes +
10076                 get_stat64(&hw_stats->tx_octets);
10077
10078         stats->rx_errors = old_stats->rx_errors +
10079                 get_stat64(&hw_stats->rx_errors);
10080         stats->tx_errors = old_stats->tx_errors +
10081                 get_stat64(&hw_stats->tx_errors) +
10082                 get_stat64(&hw_stats->tx_mac_errors) +
10083                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10084                 get_stat64(&hw_stats->tx_discards);
10085
10086         stats->multicast = old_stats->multicast +
10087                 get_stat64(&hw_stats->rx_mcast_packets);
10088         stats->collisions = old_stats->collisions +
10089                 get_stat64(&hw_stats->tx_collisions);
10090
10091         stats->rx_length_errors = old_stats->rx_length_errors +
10092                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10093                 get_stat64(&hw_stats->rx_undersize_packets);
10094
10095         stats->rx_over_errors = old_stats->rx_over_errors +
10096                 get_stat64(&hw_stats->rxbds_empty);
10097         stats->rx_frame_errors = old_stats->rx_frame_errors +
10098                 get_stat64(&hw_stats->rx_align_errors);
10099         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10100                 get_stat64(&hw_stats->tx_discards);
10101         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10102                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10103
10104         stats->rx_crc_errors = old_stats->rx_crc_errors +
10105                 calc_crc_errors(tp);
10106
10107         stats->rx_missed_errors = old_stats->rx_missed_errors +
10108                 get_stat64(&hw_stats->rx_discards);
10109
10110         stats->rx_dropped = tp->rx_dropped;
10111         stats->tx_dropped = tp->tx_dropped;
10112
10113         return stats;
10114 }
10115
10116 static inline u32 calc_crc(unsigned char *buf, int len)
10117 {
10118         u32 reg;
10119         u32 tmp;
10120         int j, k;
10121
10122         reg = 0xffffffff;
10123
10124         for (j = 0; j < len; j++) {
10125                 reg ^= buf[j];
10126
10127                 for (k = 0; k < 8; k++) {
10128                         tmp = reg & 0x01;
10129
10130                         reg >>= 1;
10131
10132                         if (tmp)
10133                                 reg ^= 0xedb88320;
10134                 }
10135         }
10136
10137         return ~reg;
10138 }
10139
10140 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10141 {
10142         /* accept or reject all multicast frames */
10143         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10144         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10145         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10146         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10147 }
10148
10149 static void __tg3_set_rx_mode(struct net_device *dev)
10150 {
10151         struct tg3 *tp = netdev_priv(dev);
10152         u32 rx_mode;
10153
10154         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10155                                   RX_MODE_KEEP_VLAN_TAG);
10156
10157 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10158         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10159          * flag clear.
10160          */
10161         if (!tg3_flag(tp, ENABLE_ASF))
10162                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10163 #endif
10164
10165         if (dev->flags & IFF_PROMISC) {
10166                 /* Promiscuous mode. */
10167                 rx_mode |= RX_MODE_PROMISC;
10168         } else if (dev->flags & IFF_ALLMULTI) {
10169                 /* Accept all multicast. */
10170                 tg3_set_multi(tp, 1);
10171         } else if (netdev_mc_empty(dev)) {
10172                 /* Reject all multicast. */
10173                 tg3_set_multi(tp, 0);
10174         } else {
10175                 /* Accept one or more multicast(s). */
10176                 struct netdev_hw_addr *ha;
10177                 u32 mc_filter[4] = { 0, };
10178                 u32 regidx;
10179                 u32 bit;
10180                 u32 crc;
10181
10182                 netdev_for_each_mc_addr(ha, dev) {
10183                         crc = calc_crc(ha->addr, ETH_ALEN);
10184                         bit = ~crc & 0x7f;
10185                         regidx = (bit & 0x60) >> 5;
10186                         bit &= 0x1f;
10187                         mc_filter[regidx] |= (1 << bit);
10188                 }
10189
10190                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10191                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10192                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10193                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10194         }
10195
10196         if (rx_mode != tp->rx_mode) {
10197                 tp->rx_mode = rx_mode;
10198                 tw32_f(MAC_RX_MODE, rx_mode);
10199                 udelay(10);
10200         }
10201 }
10202
10203 static void tg3_set_rx_mode(struct net_device *dev)
10204 {
10205         struct tg3 *tp = netdev_priv(dev);
10206
10207         if (!netif_running(dev))
10208                 return;
10209
10210         tg3_full_lock(tp, 0);
10211         __tg3_set_rx_mode(dev);
10212         tg3_full_unlock(tp);
10213 }
10214
10215 static int tg3_get_regs_len(struct net_device *dev)
10216 {
10217         return TG3_REG_BLK_SIZE;
10218 }
10219
10220 static void tg3_get_regs(struct net_device *dev,
10221                 struct ethtool_regs *regs, void *_p)
10222 {
10223         struct tg3 *tp = netdev_priv(dev);
10224
10225         regs->version = 0;
10226
10227         memset(_p, 0, TG3_REG_BLK_SIZE);
10228
10229         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10230                 return;
10231
10232         tg3_full_lock(tp, 0);
10233
10234         tg3_dump_legacy_regs(tp, (u32 *)_p);
10235
10236         tg3_full_unlock(tp);
10237 }
10238
10239 static int tg3_get_eeprom_len(struct net_device *dev)
10240 {
10241         struct tg3 *tp = netdev_priv(dev);
10242
10243         return tp->nvram_size;
10244 }
10245
10246 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10247 {
10248         struct tg3 *tp = netdev_priv(dev);
10249         int ret;
10250         u8  *pd;
10251         u32 i, offset, len, b_offset, b_count;
10252         __be32 val;
10253
10254         if (tg3_flag(tp, NO_NVRAM))
10255                 return -EINVAL;
10256
10257         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10258                 return -EAGAIN;
10259
10260         offset = eeprom->offset;
10261         len = eeprom->len;
10262         eeprom->len = 0;
10263
10264         eeprom->magic = TG3_EEPROM_MAGIC;
10265
10266         if (offset & 3) {
10267                 /* adjustments to start on required 4 byte boundary */
10268                 b_offset = offset & 3;
10269                 b_count = 4 - b_offset;
10270                 if (b_count > len) {
10271                         /* i.e. offset=1 len=2 */
10272                         b_count = len;
10273                 }
10274                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10275                 if (ret)
10276                         return ret;
10277                 memcpy(data, ((char *)&val) + b_offset, b_count);
10278                 len -= b_count;
10279                 offset += b_count;
10280                 eeprom->len += b_count;
10281         }
10282
10283         /* read bytes up to the last 4 byte boundary */
10284         pd = &data[eeprom->len];
10285         for (i = 0; i < (len - (len & 3)); i += 4) {
10286                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10287                 if (ret) {
10288                         eeprom->len += i;
10289                         return ret;
10290                 }
10291                 memcpy(pd + i, &val, 4);
10292         }
10293         eeprom->len += i;
10294
10295         if (len & 3) {
10296                 /* read last bytes not ending on 4 byte boundary */
10297                 pd = &data[eeprom->len];
10298                 b_count = len & 3;
10299                 b_offset = offset + len - b_count;
10300                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10301                 if (ret)
10302                         return ret;
10303                 memcpy(pd, &val, b_count);
10304                 eeprom->len += b_count;
10305         }
10306         return 0;
10307 }
10308
10309 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10310
10311 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10312 {
10313         struct tg3 *tp = netdev_priv(dev);
10314         int ret;
10315         u32 offset, len, b_offset, odd_len;
10316         u8 *buf;
10317         __be32 start, end;
10318
10319         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10320                 return -EAGAIN;
10321
10322         if (tg3_flag(tp, NO_NVRAM) ||
10323             eeprom->magic != TG3_EEPROM_MAGIC)
10324                 return -EINVAL;
10325
10326         offset = eeprom->offset;
10327         len = eeprom->len;
10328
10329         if ((b_offset = (offset & 3))) {
10330                 /* adjustments to start on required 4 byte boundary */
10331                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10332                 if (ret)
10333                         return ret;
10334                 len += b_offset;
10335                 offset &= ~3;
10336                 if (len < 4)
10337                         len = 4;
10338         }
10339
10340         odd_len = 0;
10341         if (len & 3) {
10342                 /* adjustments to end on required 4 byte boundary */
10343                 odd_len = 1;
10344                 len = (len + 3) & ~3;
10345                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10346                 if (ret)
10347                         return ret;
10348         }
10349
10350         buf = data;
10351         if (b_offset || odd_len) {
10352                 buf = kmalloc(len, GFP_KERNEL);
10353                 if (!buf)
10354                         return -ENOMEM;
10355                 if (b_offset)
10356                         memcpy(buf, &start, 4);
10357                 if (odd_len)
10358                         memcpy(buf+len-4, &end, 4);
10359                 memcpy(buf + b_offset, data, eeprom->len);
10360         }
10361
10362         ret = tg3_nvram_write_block(tp, offset, len, buf);
10363
10364         if (buf != data)
10365                 kfree(buf);
10366
10367         return ret;
10368 }
10369
10370 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10371 {
10372         struct tg3 *tp = netdev_priv(dev);
10373
10374         if (tg3_flag(tp, USE_PHYLIB)) {
10375                 struct phy_device *phydev;
10376                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10377                         return -EAGAIN;
10378                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10379                 return phy_ethtool_gset(phydev, cmd);
10380         }
10381
10382         cmd->supported = (SUPPORTED_Autoneg);
10383
10384         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10385                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10386                                    SUPPORTED_1000baseT_Full);
10387
10388         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10389                 cmd->supported |= (SUPPORTED_100baseT_Half |
10390                                   SUPPORTED_100baseT_Full |
10391                                   SUPPORTED_10baseT_Half |
10392                                   SUPPORTED_10baseT_Full |
10393                                   SUPPORTED_TP);
10394                 cmd->port = PORT_TP;
10395         } else {
10396                 cmd->supported |= SUPPORTED_FIBRE;
10397                 cmd->port = PORT_FIBRE;
10398         }
10399
10400         cmd->advertising = tp->link_config.advertising;
10401         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10402                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10403                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10404                                 cmd->advertising |= ADVERTISED_Pause;
10405                         } else {
10406                                 cmd->advertising |= ADVERTISED_Pause |
10407                                                     ADVERTISED_Asym_Pause;
10408                         }
10409                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10410                         cmd->advertising |= ADVERTISED_Asym_Pause;
10411                 }
10412         }
10413         if (netif_running(dev)) {
10414                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10415                 cmd->duplex = tp->link_config.active_duplex;
10416         } else {
10417                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10418                 cmd->duplex = DUPLEX_INVALID;
10419         }
10420         cmd->phy_address = tp->phy_addr;
10421         cmd->transceiver = XCVR_INTERNAL;
10422         cmd->autoneg = tp->link_config.autoneg;
10423         cmd->maxtxpkt = 0;
10424         cmd->maxrxpkt = 0;
10425         return 0;
10426 }
10427
10428 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10429 {
10430         struct tg3 *tp = netdev_priv(dev);
10431         u32 speed = ethtool_cmd_speed(cmd);
10432
10433         if (tg3_flag(tp, USE_PHYLIB)) {
10434                 struct phy_device *phydev;
10435                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10436                         return -EAGAIN;
10437                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10438                 return phy_ethtool_sset(phydev, cmd);
10439         }
10440
10441         if (cmd->autoneg != AUTONEG_ENABLE &&
10442             cmd->autoneg != AUTONEG_DISABLE)
10443                 return -EINVAL;
10444
10445         if (cmd->autoneg == AUTONEG_DISABLE &&
10446             cmd->duplex != DUPLEX_FULL &&
10447             cmd->duplex != DUPLEX_HALF)
10448                 return -EINVAL;
10449
10450         if (cmd->autoneg == AUTONEG_ENABLE) {
10451                 u32 mask = ADVERTISED_Autoneg |
10452                            ADVERTISED_Pause |
10453                            ADVERTISED_Asym_Pause;
10454
10455                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10456                         mask |= ADVERTISED_1000baseT_Half |
10457                                 ADVERTISED_1000baseT_Full;
10458
10459                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10460                         mask |= ADVERTISED_100baseT_Half |
10461                                 ADVERTISED_100baseT_Full |
10462                                 ADVERTISED_10baseT_Half |
10463                                 ADVERTISED_10baseT_Full |
10464                                 ADVERTISED_TP;
10465                 else
10466                         mask |= ADVERTISED_FIBRE;
10467
10468                 if (cmd->advertising & ~mask)
10469                         return -EINVAL;
10470
10471                 mask &= (ADVERTISED_1000baseT_Half |
10472                          ADVERTISED_1000baseT_Full |
10473                          ADVERTISED_100baseT_Half |
10474                          ADVERTISED_100baseT_Full |
10475                          ADVERTISED_10baseT_Half |
10476                          ADVERTISED_10baseT_Full);
10477
10478                 cmd->advertising &= mask;
10479         } else {
10480                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10481                         if (speed != SPEED_1000)
10482                                 return -EINVAL;
10483
10484                         if (cmd->duplex != DUPLEX_FULL)
10485                                 return -EINVAL;
10486                 } else {
10487                         if (speed != SPEED_100 &&
10488                             speed != SPEED_10)
10489                                 return -EINVAL;
10490                 }
10491         }
10492
10493         tg3_full_lock(tp, 0);
10494
10495         tp->link_config.autoneg = cmd->autoneg;
10496         if (cmd->autoneg == AUTONEG_ENABLE) {
10497                 tp->link_config.advertising = (cmd->advertising |
10498                                               ADVERTISED_Autoneg);
10499                 tp->link_config.speed = SPEED_INVALID;
10500                 tp->link_config.duplex = DUPLEX_INVALID;
10501         } else {
10502                 tp->link_config.advertising = 0;
10503                 tp->link_config.speed = speed;
10504                 tp->link_config.duplex = cmd->duplex;
10505         }
10506
10507         tp->link_config.orig_speed = tp->link_config.speed;
10508         tp->link_config.orig_duplex = tp->link_config.duplex;
10509         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10510
10511         if (netif_running(dev))
10512                 tg3_setup_phy(tp, 1);
10513
10514         tg3_full_unlock(tp);
10515
10516         return 0;
10517 }
10518
10519 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10520 {
10521         struct tg3 *tp = netdev_priv(dev);
10522
10523         strcpy(info->driver, DRV_MODULE_NAME);
10524         strcpy(info->version, DRV_MODULE_VERSION);
10525         strcpy(info->fw_version, tp->fw_ver);
10526         strcpy(info->bus_info, pci_name(tp->pdev));
10527 }
10528
10529 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10530 {
10531         struct tg3 *tp = netdev_priv(dev);
10532
10533         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10534                 wol->supported = WAKE_MAGIC;
10535         else
10536                 wol->supported = 0;
10537         wol->wolopts = 0;
10538         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10539                 wol->wolopts = WAKE_MAGIC;
10540         memset(&wol->sopass, 0, sizeof(wol->sopass));
10541 }
10542
10543 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10544 {
10545         struct tg3 *tp = netdev_priv(dev);
10546         struct device *dp = &tp->pdev->dev;
10547
10548         if (wol->wolopts & ~WAKE_MAGIC)
10549                 return -EINVAL;
10550         if ((wol->wolopts & WAKE_MAGIC) &&
10551             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10552                 return -EINVAL;
10553
10554         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10555
10556         spin_lock_bh(&tp->lock);
10557         if (device_may_wakeup(dp))
10558                 tg3_flag_set(tp, WOL_ENABLE);
10559         else
10560                 tg3_flag_clear(tp, WOL_ENABLE);
10561         spin_unlock_bh(&tp->lock);
10562
10563         return 0;
10564 }
10565
10566 static u32 tg3_get_msglevel(struct net_device *dev)
10567 {
10568         struct tg3 *tp = netdev_priv(dev);
10569         return tp->msg_enable;
10570 }
10571
10572 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10573 {
10574         struct tg3 *tp = netdev_priv(dev);
10575         tp->msg_enable = value;
10576 }
10577
10578 static int tg3_nway_reset(struct net_device *dev)
10579 {
10580         struct tg3 *tp = netdev_priv(dev);
10581         int r;
10582
10583         if (!netif_running(dev))
10584                 return -EAGAIN;
10585
10586         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10587                 return -EINVAL;
10588
10589         if (tg3_flag(tp, USE_PHYLIB)) {
10590                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10591                         return -EAGAIN;
10592                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10593         } else {
10594                 u32 bmcr;
10595
10596                 spin_lock_bh(&tp->lock);
10597                 r = -EINVAL;
10598                 tg3_readphy(tp, MII_BMCR, &bmcr);
10599                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10600                     ((bmcr & BMCR_ANENABLE) ||
10601                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10602                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10603                                                    BMCR_ANENABLE);
10604                         r = 0;
10605                 }
10606                 spin_unlock_bh(&tp->lock);
10607         }
10608
10609         return r;
10610 }
10611
10612 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10613 {
10614         struct tg3 *tp = netdev_priv(dev);
10615
10616         ering->rx_max_pending = tp->rx_std_ring_mask;
10617         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10618                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10619         else
10620                 ering->rx_jumbo_max_pending = 0;
10621
10622         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10623
10624         ering->rx_pending = tp->rx_pending;
10625         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10626                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10627         else
10628                 ering->rx_jumbo_pending = 0;
10629
10630         ering->tx_pending = tp->napi[0].tx_pending;
10631 }
10632
10633 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10634 {
10635         struct tg3 *tp = netdev_priv(dev);
10636         int i, irq_sync = 0, err = 0;
10637
10638         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10639             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10640             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10641             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10642             (tg3_flag(tp, TSO_BUG) &&
10643              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10644                 return -EINVAL;
10645
10646         if (netif_running(dev)) {
10647                 tg3_phy_stop(tp);
10648                 tg3_netif_stop(tp);
10649                 irq_sync = 1;
10650         }
10651
10652         tg3_full_lock(tp, irq_sync);
10653
10654         tp->rx_pending = ering->rx_pending;
10655
10656         if (tg3_flag(tp, MAX_RXPEND_64) &&
10657             tp->rx_pending > 63)
10658                 tp->rx_pending = 63;
10659         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10660
10661         for (i = 0; i < tp->irq_max; i++)
10662                 tp->napi[i].tx_pending = ering->tx_pending;
10663
10664         if (netif_running(dev)) {
10665                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10666                 err = tg3_restart_hw(tp, 1);
10667                 if (!err)
10668                         tg3_netif_start(tp);
10669         }
10670
10671         tg3_full_unlock(tp);
10672
10673         if (irq_sync && !err)
10674                 tg3_phy_start(tp);
10675
10676         return err;
10677 }
10678
10679 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10680 {
10681         struct tg3 *tp = netdev_priv(dev);
10682
10683         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10684
10685         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10686                 epause->rx_pause = 1;
10687         else
10688                 epause->rx_pause = 0;
10689
10690         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10691                 epause->tx_pause = 1;
10692         else
10693                 epause->tx_pause = 0;
10694 }
10695
10696 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10697 {
10698         struct tg3 *tp = netdev_priv(dev);
10699         int err = 0;
10700
10701         if (tg3_flag(tp, USE_PHYLIB)) {
10702                 u32 newadv;
10703                 struct phy_device *phydev;
10704
10705                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10706
10707                 if (!(phydev->supported & SUPPORTED_Pause) ||
10708                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10709                      (epause->rx_pause != epause->tx_pause)))
10710                         return -EINVAL;
10711
10712                 tp->link_config.flowctrl = 0;
10713                 if (epause->rx_pause) {
10714                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10715
10716                         if (epause->tx_pause) {
10717                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10718                                 newadv = ADVERTISED_Pause;
10719                         } else
10720                                 newadv = ADVERTISED_Pause |
10721                                          ADVERTISED_Asym_Pause;
10722                 } else if (epause->tx_pause) {
10723                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10724                         newadv = ADVERTISED_Asym_Pause;
10725                 } else
10726                         newadv = 0;
10727
10728                 if (epause->autoneg)
10729                         tg3_flag_set(tp, PAUSE_AUTONEG);
10730                 else
10731                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10732
10733                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10734                         u32 oldadv = phydev->advertising &
10735                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10736                         if (oldadv != newadv) {
10737                                 phydev->advertising &=
10738                                         ~(ADVERTISED_Pause |
10739                                           ADVERTISED_Asym_Pause);
10740                                 phydev->advertising |= newadv;
10741                                 if (phydev->autoneg) {
10742                                         /*
10743                                          * Always renegotiate the link to
10744                                          * inform our link partner of our
10745                                          * flow control settings, even if the
10746                                          * flow control is forced.  Let
10747                                          * tg3_adjust_link() do the final
10748                                          * flow control setup.
10749                                          */
10750                                         return phy_start_aneg(phydev);
10751                                 }
10752                         }
10753
10754                         if (!epause->autoneg)
10755                                 tg3_setup_flow_control(tp, 0, 0);
10756                 } else {
10757                         tp->link_config.orig_advertising &=
10758                                         ~(ADVERTISED_Pause |
10759                                           ADVERTISED_Asym_Pause);
10760                         tp->link_config.orig_advertising |= newadv;
10761                 }
10762         } else {
10763                 int irq_sync = 0;
10764
10765                 if (netif_running(dev)) {
10766                         tg3_netif_stop(tp);
10767                         irq_sync = 1;
10768                 }
10769
10770                 tg3_full_lock(tp, irq_sync);
10771
10772                 if (epause->autoneg)
10773                         tg3_flag_set(tp, PAUSE_AUTONEG);
10774                 else
10775                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10776                 if (epause->rx_pause)
10777                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10778                 else
10779                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10780                 if (epause->tx_pause)
10781                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10782                 else
10783                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10784
10785                 if (netif_running(dev)) {
10786                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10787                         err = tg3_restart_hw(tp, 1);
10788                         if (!err)
10789                                 tg3_netif_start(tp);
10790                 }
10791
10792                 tg3_full_unlock(tp);
10793         }
10794
10795         return err;
10796 }
10797
10798 static int tg3_get_sset_count(struct net_device *dev, int sset)
10799 {
10800         switch (sset) {
10801         case ETH_SS_TEST:
10802                 return TG3_NUM_TEST;
10803         case ETH_SS_STATS:
10804                 return TG3_NUM_STATS;
10805         default:
10806                 return -EOPNOTSUPP;
10807         }
10808 }
10809
10810 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10811 {
10812         switch (stringset) {
10813         case ETH_SS_STATS:
10814                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10815                 break;
10816         case ETH_SS_TEST:
10817                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10818                 break;
10819         default:
10820                 WARN_ON(1);     /* we need a WARN() */
10821                 break;
10822         }
10823 }
10824
10825 static int tg3_set_phys_id(struct net_device *dev,
10826                             enum ethtool_phys_id_state state)
10827 {
10828         struct tg3 *tp = netdev_priv(dev);
10829
10830         if (!netif_running(tp->dev))
10831                 return -EAGAIN;
10832
10833         switch (state) {
10834         case ETHTOOL_ID_ACTIVE:
10835                 return 1;       /* cycle on/off once per second */
10836
10837         case ETHTOOL_ID_ON:
10838                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10839                      LED_CTRL_1000MBPS_ON |
10840                      LED_CTRL_100MBPS_ON |
10841                      LED_CTRL_10MBPS_ON |
10842                      LED_CTRL_TRAFFIC_OVERRIDE |
10843                      LED_CTRL_TRAFFIC_BLINK |
10844                      LED_CTRL_TRAFFIC_LED);
10845                 break;
10846
10847         case ETHTOOL_ID_OFF:
10848                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10849                      LED_CTRL_TRAFFIC_OVERRIDE);
10850                 break;
10851
10852         case ETHTOOL_ID_INACTIVE:
10853                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10854                 break;
10855         }
10856
10857         return 0;
10858 }
10859
10860 static void tg3_get_ethtool_stats(struct net_device *dev,
10861                                    struct ethtool_stats *estats, u64 *tmp_stats)
10862 {
10863         struct tg3 *tp = netdev_priv(dev);
10864         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10865 }
10866
10867 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10868 {
10869         int i;
10870         __be32 *buf;
10871         u32 offset = 0, len = 0;
10872         u32 magic, val;
10873
10874         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10875                 return NULL;
10876
10877         if (magic == TG3_EEPROM_MAGIC) {
10878                 for (offset = TG3_NVM_DIR_START;
10879                      offset < TG3_NVM_DIR_END;
10880                      offset += TG3_NVM_DIRENT_SIZE) {
10881                         if (tg3_nvram_read(tp, offset, &val))
10882                                 return NULL;
10883
10884                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10885                             TG3_NVM_DIRTYPE_EXTVPD)
10886                                 break;
10887                 }
10888
10889                 if (offset != TG3_NVM_DIR_END) {
10890                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10891                         if (tg3_nvram_read(tp, offset + 4, &offset))
10892                                 return NULL;
10893
10894                         offset = tg3_nvram_logical_addr(tp, offset);
10895                 }
10896         }
10897
10898         if (!offset || !len) {
10899                 offset = TG3_NVM_VPD_OFF;
10900                 len = TG3_NVM_VPD_LEN;
10901         }
10902
10903         buf = kmalloc(len, GFP_KERNEL);
10904         if (buf == NULL)
10905                 return NULL;
10906
10907         if (magic == TG3_EEPROM_MAGIC) {
10908                 for (i = 0; i < len; i += 4) {
10909                         /* The data is in little-endian format in NVRAM.
10910                          * Use the big-endian read routines to preserve
10911                          * the byte order as it exists in NVRAM.
10912                          */
10913                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10914                                 goto error;
10915                 }
10916         } else {
10917                 u8 *ptr;
10918                 ssize_t cnt;
10919                 unsigned int pos = 0;
10920
10921                 ptr = (u8 *)&buf[0];
10922                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10923                         cnt = pci_read_vpd(tp->pdev, pos,
10924                                            len - pos, ptr);
10925                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10926                                 cnt = 0;
10927                         else if (cnt < 0)
10928                                 goto error;
10929                 }
10930                 if (pos != len)
10931                         goto error;
10932         }
10933
10934         *vpdlen = len;
10935
10936         return buf;
10937
10938 error:
10939         kfree(buf);
10940         return NULL;
10941 }
10942
10943 #define NVRAM_TEST_SIZE 0x100
10944 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10945 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10946 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10947 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10948 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10949 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10950 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10951 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10952
10953 static int tg3_test_nvram(struct tg3 *tp)
10954 {
10955         u32 csum, magic, len;
10956         __be32 *buf;
10957         int i, j, k, err = 0, size;
10958
10959         if (tg3_flag(tp, NO_NVRAM))
10960                 return 0;
10961
10962         if (tg3_nvram_read(tp, 0, &magic) != 0)
10963                 return -EIO;
10964
10965         if (magic == TG3_EEPROM_MAGIC)
10966                 size = NVRAM_TEST_SIZE;
10967         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10968                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10969                     TG3_EEPROM_SB_FORMAT_1) {
10970                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10971                         case TG3_EEPROM_SB_REVISION_0:
10972                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10973                                 break;
10974                         case TG3_EEPROM_SB_REVISION_2:
10975                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10976                                 break;
10977                         case TG3_EEPROM_SB_REVISION_3:
10978                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10979                                 break;
10980                         case TG3_EEPROM_SB_REVISION_4:
10981                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10982                                 break;
10983                         case TG3_EEPROM_SB_REVISION_5:
10984                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10985                                 break;
10986                         case TG3_EEPROM_SB_REVISION_6:
10987                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10988                                 break;
10989                         default:
10990                                 return -EIO;
10991                         }
10992                 } else
10993                         return 0;
10994         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10995                 size = NVRAM_SELFBOOT_HW_SIZE;
10996         else
10997                 return -EIO;
10998
10999         buf = kmalloc(size, GFP_KERNEL);
11000         if (buf == NULL)
11001                 return -ENOMEM;
11002
11003         err = -EIO;
11004         for (i = 0, j = 0; i < size; i += 4, j++) {
11005                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11006                 if (err)
11007                         break;
11008         }
11009         if (i < size)
11010                 goto out;
11011
11012         /* Selfboot format */
11013         magic = be32_to_cpu(buf[0]);
11014         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11015             TG3_EEPROM_MAGIC_FW) {
11016                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11017
11018                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11019                     TG3_EEPROM_SB_REVISION_2) {
11020                         /* For rev 2, the csum doesn't include the MBA. */
11021                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11022                                 csum8 += buf8[i];
11023                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11024                                 csum8 += buf8[i];
11025                 } else {
11026                         for (i = 0; i < size; i++)
11027                                 csum8 += buf8[i];
11028                 }
11029
11030                 if (csum8 == 0) {
11031                         err = 0;
11032                         goto out;
11033                 }
11034
11035                 err = -EIO;
11036                 goto out;
11037         }
11038
11039         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11040             TG3_EEPROM_MAGIC_HW) {
11041                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11042                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11043                 u8 *buf8 = (u8 *) buf;
11044
11045                 /* Separate the parity bits and the data bytes.  */
11046                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11047                         if ((i == 0) || (i == 8)) {
11048                                 int l;
11049                                 u8 msk;
11050
11051                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11052                                         parity[k++] = buf8[i] & msk;
11053                                 i++;
11054                         } else if (i == 16) {
11055                                 int l;
11056                                 u8 msk;
11057
11058                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11059                                         parity[k++] = buf8[i] & msk;
11060                                 i++;
11061
11062                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11063                                         parity[k++] = buf8[i] & msk;
11064                                 i++;
11065                         }
11066                         data[j++] = buf8[i];
11067                 }
11068
11069                 err = -EIO;
11070                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11071                         u8 hw8 = hweight8(data[i]);
11072
11073                         if ((hw8 & 0x1) && parity[i])
11074                                 goto out;
11075                         else if (!(hw8 & 0x1) && !parity[i])
11076                                 goto out;
11077                 }
11078                 err = 0;
11079                 goto out;
11080         }
11081
11082         err = -EIO;
11083
11084         /* Bootstrap checksum at offset 0x10 */
11085         csum = calc_crc((unsigned char *) buf, 0x10);
11086         if (csum != le32_to_cpu(buf[0x10/4]))
11087                 goto out;
11088
11089         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11090         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11091         if (csum != le32_to_cpu(buf[0xfc/4]))
11092                 goto out;
11093
11094         kfree(buf);
11095
11096         buf = tg3_vpd_readblock(tp, &len);
11097         if (!buf)
11098                 return -ENOMEM;
11099
11100         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11101         if (i > 0) {
11102                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11103                 if (j < 0)
11104                         goto out;
11105
11106                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11107                         goto out;
11108
11109                 i += PCI_VPD_LRDT_TAG_SIZE;
11110                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11111                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11112                 if (j > 0) {
11113                         u8 csum8 = 0;
11114
11115                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11116
11117                         for (i = 0; i <= j; i++)
11118                                 csum8 += ((u8 *)buf)[i];
11119
11120                         if (csum8)
11121                                 goto out;
11122                 }
11123         }
11124
11125         err = 0;
11126
11127 out:
11128         kfree(buf);
11129         return err;
11130 }
11131
11132 #define TG3_SERDES_TIMEOUT_SEC  2
11133 #define TG3_COPPER_TIMEOUT_SEC  6
11134
11135 static int tg3_test_link(struct tg3 *tp)
11136 {
11137         int i, max;
11138
11139         if (!netif_running(tp->dev))
11140                 return -ENODEV;
11141
11142         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11143                 max = TG3_SERDES_TIMEOUT_SEC;
11144         else
11145                 max = TG3_COPPER_TIMEOUT_SEC;
11146
11147         for (i = 0; i < max; i++) {
11148                 if (netif_carrier_ok(tp->dev))
11149                         return 0;
11150
11151                 if (msleep_interruptible(1000))
11152                         break;
11153         }
11154
11155         return -EIO;
11156 }
11157
11158 /* Only test the commonly used registers */
11159 static int tg3_test_registers(struct tg3 *tp)
11160 {
11161         int i, is_5705, is_5750;
11162         u32 offset, read_mask, write_mask, val, save_val, read_val;
11163         static struct {
11164                 u16 offset;
11165                 u16 flags;
11166 #define TG3_FL_5705     0x1
11167 #define TG3_FL_NOT_5705 0x2
11168 #define TG3_FL_NOT_5788 0x4
11169 #define TG3_FL_NOT_5750 0x8
11170                 u32 read_mask;
11171                 u32 write_mask;
11172         } reg_tbl[] = {
11173                 /* MAC Control Registers */
11174                 { MAC_MODE, TG3_FL_NOT_5705,
11175                         0x00000000, 0x00ef6f8c },
11176                 { MAC_MODE, TG3_FL_5705,
11177                         0x00000000, 0x01ef6b8c },
11178                 { MAC_STATUS, TG3_FL_NOT_5705,
11179                         0x03800107, 0x00000000 },
11180                 { MAC_STATUS, TG3_FL_5705,
11181                         0x03800100, 0x00000000 },
11182                 { MAC_ADDR_0_HIGH, 0x0000,
11183                         0x00000000, 0x0000ffff },
11184                 { MAC_ADDR_0_LOW, 0x0000,
11185                         0x00000000, 0xffffffff },
11186                 { MAC_RX_MTU_SIZE, 0x0000,
11187                         0x00000000, 0x0000ffff },
11188                 { MAC_TX_MODE, 0x0000,
11189                         0x00000000, 0x00000070 },
11190                 { MAC_TX_LENGTHS, 0x0000,
11191                         0x00000000, 0x00003fff },
11192                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11193                         0x00000000, 0x000007fc },
11194                 { MAC_RX_MODE, TG3_FL_5705,
11195                         0x00000000, 0x000007dc },
11196                 { MAC_HASH_REG_0, 0x0000,
11197                         0x00000000, 0xffffffff },
11198                 { MAC_HASH_REG_1, 0x0000,
11199                         0x00000000, 0xffffffff },
11200                 { MAC_HASH_REG_2, 0x0000,
11201                         0x00000000, 0xffffffff },
11202                 { MAC_HASH_REG_3, 0x0000,
11203                         0x00000000, 0xffffffff },
11204
11205                 /* Receive Data and Receive BD Initiator Control Registers. */
11206                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11207                         0x00000000, 0xffffffff },
11208                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11209                         0x00000000, 0xffffffff },
11210                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11211                         0x00000000, 0x00000003 },
11212                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11213                         0x00000000, 0xffffffff },
11214                 { RCVDBDI_STD_BD+0, 0x0000,
11215                         0x00000000, 0xffffffff },
11216                 { RCVDBDI_STD_BD+4, 0x0000,
11217                         0x00000000, 0xffffffff },
11218                 { RCVDBDI_STD_BD+8, 0x0000,
11219                         0x00000000, 0xffff0002 },
11220                 { RCVDBDI_STD_BD+0xc, 0x0000,
11221                         0x00000000, 0xffffffff },
11222
11223                 /* Receive BD Initiator Control Registers. */
11224                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11225                         0x00000000, 0xffffffff },
11226                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11227                         0x00000000, 0x000003ff },
11228                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11229                         0x00000000, 0xffffffff },
11230
11231                 /* Host Coalescing Control Registers. */
11232                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11233                         0x00000000, 0x00000004 },
11234                 { HOSTCC_MODE, TG3_FL_5705,
11235                         0x00000000, 0x000000f6 },
11236                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11237                         0x00000000, 0xffffffff },
11238                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11239                         0x00000000, 0x000003ff },
11240                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11241                         0x00000000, 0xffffffff },
11242                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11243                         0x00000000, 0x000003ff },
11244                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11245                         0x00000000, 0xffffffff },
11246                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11247                         0x00000000, 0x000000ff },
11248                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11249                         0x00000000, 0xffffffff },
11250                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11251                         0x00000000, 0x000000ff },
11252                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11253                         0x00000000, 0xffffffff },
11254                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11255                         0x00000000, 0xffffffff },
11256                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11257                         0x00000000, 0xffffffff },
11258                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11259                         0x00000000, 0x000000ff },
11260                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11261                         0x00000000, 0xffffffff },
11262                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11263                         0x00000000, 0x000000ff },
11264                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11265                         0x00000000, 0xffffffff },
11266                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11267                         0x00000000, 0xffffffff },
11268                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11269                         0x00000000, 0xffffffff },
11270                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11271                         0x00000000, 0xffffffff },
11272                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11273                         0x00000000, 0xffffffff },
11274                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11275                         0xffffffff, 0x00000000 },
11276                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11277                         0xffffffff, 0x00000000 },
11278
11279                 /* Buffer Manager Control Registers. */
11280                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11281                         0x00000000, 0x007fff80 },
11282                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11283                         0x00000000, 0x007fffff },
11284                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11285                         0x00000000, 0x0000003f },
11286                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11287                         0x00000000, 0x000001ff },
11288                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11289                         0x00000000, 0x000001ff },
11290                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11291                         0xffffffff, 0x00000000 },
11292                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11293                         0xffffffff, 0x00000000 },
11294
11295                 /* Mailbox Registers */
11296                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11297                         0x00000000, 0x000001ff },
11298                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11299                         0x00000000, 0x000001ff },
11300                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11301                         0x00000000, 0x000007ff },
11302                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11303                         0x00000000, 0x000001ff },
11304
11305                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11306         };
11307
11308         is_5705 = is_5750 = 0;
11309         if (tg3_flag(tp, 5705_PLUS)) {
11310                 is_5705 = 1;
11311                 if (tg3_flag(tp, 5750_PLUS))
11312                         is_5750 = 1;
11313         }
11314
11315         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11316                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11317                         continue;
11318
11319                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11320                         continue;
11321
11322                 if (tg3_flag(tp, IS_5788) &&
11323                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11324                         continue;
11325
11326                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11327                         continue;
11328
11329                 offset = (u32) reg_tbl[i].offset;
11330                 read_mask = reg_tbl[i].read_mask;
11331                 write_mask = reg_tbl[i].write_mask;
11332
11333                 /* Save the original register content */
11334                 save_val = tr32(offset);
11335
11336                 /* Determine the read-only value. */
11337                 read_val = save_val & read_mask;
11338
11339                 /* Write zero to the register, then make sure the read-only bits
11340                  * are not changed and the read/write bits are all zeros.
11341                  */
11342                 tw32(offset, 0);
11343
11344                 val = tr32(offset);
11345
11346                 /* Test the read-only and read/write bits. */
11347                 if (((val & read_mask) != read_val) || (val & write_mask))
11348                         goto out;
11349
11350                 /* Write ones to all the bits defined by RdMask and WrMask, then
11351                  * make sure the read-only bits are not changed and the
11352                  * read/write bits are all ones.
11353                  */
11354                 tw32(offset, read_mask | write_mask);
11355
11356                 val = tr32(offset);
11357
11358                 /* Test the read-only bits. */
11359                 if ((val & read_mask) != read_val)
11360                         goto out;
11361
11362                 /* Test the read/write bits. */
11363                 if ((val & write_mask) != write_mask)
11364                         goto out;
11365
11366                 tw32(offset, save_val);
11367         }
11368
11369         return 0;
11370
11371 out:
11372         if (netif_msg_hw(tp))
11373                 netdev_err(tp->dev,
11374                            "Register test failed at offset %x\n", offset);
11375         tw32(offset, save_val);
11376         return -EIO;
11377 }
11378
11379 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11380 {
11381         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11382         int i;
11383         u32 j;
11384
11385         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11386                 for (j = 0; j < len; j += 4) {
11387                         u32 val;
11388
11389                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11390                         tg3_read_mem(tp, offset + j, &val);
11391                         if (val != test_pattern[i])
11392                                 return -EIO;
11393                 }
11394         }
11395         return 0;
11396 }
11397
11398 static int tg3_test_memory(struct tg3 *tp)
11399 {
11400         static struct mem_entry {
11401                 u32 offset;
11402                 u32 len;
11403         } mem_tbl_570x[] = {
11404                 { 0x00000000, 0x00b50},
11405                 { 0x00002000, 0x1c000},
11406                 { 0xffffffff, 0x00000}
11407         }, mem_tbl_5705[] = {
11408                 { 0x00000100, 0x0000c},
11409                 { 0x00000200, 0x00008},
11410                 { 0x00004000, 0x00800},
11411                 { 0x00006000, 0x01000},
11412                 { 0x00008000, 0x02000},
11413                 { 0x00010000, 0x0e000},
11414                 { 0xffffffff, 0x00000}
11415         }, mem_tbl_5755[] = {
11416                 { 0x00000200, 0x00008},
11417                 { 0x00004000, 0x00800},
11418                 { 0x00006000, 0x00800},
11419                 { 0x00008000, 0x02000},
11420                 { 0x00010000, 0x0c000},
11421                 { 0xffffffff, 0x00000}
11422         }, mem_tbl_5906[] = {
11423                 { 0x00000200, 0x00008},
11424                 { 0x00004000, 0x00400},
11425                 { 0x00006000, 0x00400},
11426                 { 0x00008000, 0x01000},
11427                 { 0x00010000, 0x01000},
11428                 { 0xffffffff, 0x00000}
11429         }, mem_tbl_5717[] = {
11430                 { 0x00000200, 0x00008},
11431                 { 0x00010000, 0x0a000},
11432                 { 0x00020000, 0x13c00},
11433                 { 0xffffffff, 0x00000}
11434         }, mem_tbl_57765[] = {
11435                 { 0x00000200, 0x00008},
11436                 { 0x00004000, 0x00800},
11437                 { 0x00006000, 0x09800},
11438                 { 0x00010000, 0x0a000},
11439                 { 0xffffffff, 0x00000}
11440         };
11441         struct mem_entry *mem_tbl;
11442         int err = 0;
11443         int i;
11444
11445         if (tg3_flag(tp, 5717_PLUS))
11446                 mem_tbl = mem_tbl_5717;
11447         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11448                 mem_tbl = mem_tbl_57765;
11449         else if (tg3_flag(tp, 5755_PLUS))
11450                 mem_tbl = mem_tbl_5755;
11451         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11452                 mem_tbl = mem_tbl_5906;
11453         else if (tg3_flag(tp, 5705_PLUS))
11454                 mem_tbl = mem_tbl_5705;
11455         else
11456                 mem_tbl = mem_tbl_570x;
11457
11458         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11459                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11460                 if (err)
11461                         break;
11462         }
11463
11464         return err;
11465 }
11466
11467 #define TG3_TSO_MSS             500
11468
11469 #define TG3_TSO_IP_HDR_LEN      20
11470 #define TG3_TSO_TCP_HDR_LEN     20
11471 #define TG3_TSO_TCP_OPT_LEN     12
11472
11473 static const u8 tg3_tso_header[] = {
11474 0x08, 0x00,
11475 0x45, 0x00, 0x00, 0x00,
11476 0x00, 0x00, 0x40, 0x00,
11477 0x40, 0x06, 0x00, 0x00,
11478 0x0a, 0x00, 0x00, 0x01,
11479 0x0a, 0x00, 0x00, 0x02,
11480 0x0d, 0x00, 0xe0, 0x00,
11481 0x00, 0x00, 0x01, 0x00,
11482 0x00, 0x00, 0x02, 0x00,
11483 0x80, 0x10, 0x10, 0x00,
11484 0x14, 0x09, 0x00, 0x00,
11485 0x01, 0x01, 0x08, 0x0a,
11486 0x11, 0x11, 0x11, 0x11,
11487 0x11, 0x11, 0x11, 0x11,
11488 };
11489
11490 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11491 {
11492         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11493         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11494         u32 budget;
11495         struct sk_buff *skb, *rx_skb;
11496         u8 *tx_data;
11497         dma_addr_t map;
11498         int num_pkts, tx_len, rx_len, i, err;
11499         struct tg3_rx_buffer_desc *desc;
11500         struct tg3_napi *tnapi, *rnapi;
11501         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11502
11503         tnapi = &tp->napi[0];
11504         rnapi = &tp->napi[0];
11505         if (tp->irq_cnt > 1) {
11506                 if (tg3_flag(tp, ENABLE_RSS))
11507                         rnapi = &tp->napi[1];
11508                 if (tg3_flag(tp, ENABLE_TSS))
11509                         tnapi = &tp->napi[1];
11510         }
11511         coal_now = tnapi->coal_now | rnapi->coal_now;
11512
11513         err = -EIO;
11514
11515         tx_len = pktsz;
11516         skb = netdev_alloc_skb(tp->dev, tx_len);
11517         if (!skb)
11518                 return -ENOMEM;
11519
11520         tx_data = skb_put(skb, tx_len);
11521         memcpy(tx_data, tp->dev->dev_addr, 6);
11522         memset(tx_data + 6, 0x0, 8);
11523
11524         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11525
11526         if (tso_loopback) {
11527                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11528
11529                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11530                               TG3_TSO_TCP_OPT_LEN;
11531
11532                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11533                        sizeof(tg3_tso_header));
11534                 mss = TG3_TSO_MSS;
11535
11536                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11537                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11538
11539                 /* Set the total length field in the IP header */
11540                 iph->tot_len = htons((u16)(mss + hdr_len));
11541
11542                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11543                               TXD_FLAG_CPU_POST_DMA);
11544
11545                 if (tg3_flag(tp, HW_TSO_1) ||
11546                     tg3_flag(tp, HW_TSO_2) ||
11547                     tg3_flag(tp, HW_TSO_3)) {
11548                         struct tcphdr *th;
11549                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11550                         th = (struct tcphdr *)&tx_data[val];
11551                         th->check = 0;
11552                 } else
11553                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11554
11555                 if (tg3_flag(tp, HW_TSO_3)) {
11556                         mss |= (hdr_len & 0xc) << 12;
11557                         if (hdr_len & 0x10)
11558                                 base_flags |= 0x00000010;
11559                         base_flags |= (hdr_len & 0x3e0) << 5;
11560                 } else if (tg3_flag(tp, HW_TSO_2))
11561                         mss |= hdr_len << 9;
11562                 else if (tg3_flag(tp, HW_TSO_1) ||
11563                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11564                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11565                 } else {
11566                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11567                 }
11568
11569                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11570         } else {
11571                 num_pkts = 1;
11572                 data_off = ETH_HLEN;
11573         }
11574
11575         for (i = data_off; i < tx_len; i++)
11576                 tx_data[i] = (u8) (i & 0xff);
11577
11578         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11579         if (pci_dma_mapping_error(tp->pdev, map)) {
11580                 dev_kfree_skb(skb);
11581                 return -EIO;
11582         }
11583
11584         val = tnapi->tx_prod;
11585         tnapi->tx_buffers[val].skb = skb;
11586         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11587
11588         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11589                rnapi->coal_now);
11590
11591         udelay(10);
11592
11593         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11594
11595         budget = tg3_tx_avail(tnapi);
11596         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11597                             base_flags | TXD_FLAG_END, mss, 0)) {
11598                 tnapi->tx_buffers[val].skb = NULL;
11599                 dev_kfree_skb(skb);
11600                 return -EIO;
11601         }
11602
11603         tnapi->tx_prod++;
11604
11605         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11606         tr32_mailbox(tnapi->prodmbox);
11607
11608         udelay(10);
11609
11610         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11611         for (i = 0; i < 35; i++) {
11612                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11613                        coal_now);
11614
11615                 udelay(10);
11616
11617                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11618                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11619                 if ((tx_idx == tnapi->tx_prod) &&
11620                     (rx_idx == (rx_start_idx + num_pkts)))
11621                         break;
11622         }
11623
11624         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11625         dev_kfree_skb(skb);
11626
11627         if (tx_idx != tnapi->tx_prod)
11628                 goto out;
11629
11630         if (rx_idx != rx_start_idx + num_pkts)
11631                 goto out;
11632
11633         val = data_off;
11634         while (rx_idx != rx_start_idx) {
11635                 desc = &rnapi->rx_rcb[rx_start_idx++];
11636                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11637                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11638
11639                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11640                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11641                         goto out;
11642
11643                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11644                          - ETH_FCS_LEN;
11645
11646                 if (!tso_loopback) {
11647                         if (rx_len != tx_len)
11648                                 goto out;
11649
11650                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11651                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11652                                         goto out;
11653                         } else {
11654                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11655                                         goto out;
11656                         }
11657                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11658                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11659                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11660                         goto out;
11661                 }
11662
11663                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11664                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11665                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11666                                              mapping);
11667                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11668                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11669                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11670                                              mapping);
11671                 } else
11672                         goto out;
11673
11674                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11675                                             PCI_DMA_FROMDEVICE);
11676
11677                 for (i = data_off; i < rx_len; i++, val++) {
11678                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11679                                 goto out;
11680                 }
11681         }
11682
11683         err = 0;
11684
11685         /* tg3_free_rings will unmap and free the rx_skb */
11686 out:
11687         return err;
11688 }
11689
11690 #define TG3_STD_LOOPBACK_FAILED         1
11691 #define TG3_JMB_LOOPBACK_FAILED         2
11692 #define TG3_TSO_LOOPBACK_FAILED         4
11693 #define TG3_LOOPBACK_FAILED \
11694         (TG3_STD_LOOPBACK_FAILED | \
11695          TG3_JMB_LOOPBACK_FAILED | \
11696          TG3_TSO_LOOPBACK_FAILED)
11697
11698 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11699 {
11700         int err = -EIO;
11701         u32 eee_cap;
11702
11703         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11704         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11705
11706         if (!netif_running(tp->dev)) {
11707                 data[0] = TG3_LOOPBACK_FAILED;
11708                 data[1] = TG3_LOOPBACK_FAILED;
11709                 if (do_extlpbk)
11710                         data[2] = TG3_LOOPBACK_FAILED;
11711                 goto done;
11712         }
11713
11714         err = tg3_reset_hw(tp, 1);
11715         if (err) {
11716                 data[0] = TG3_LOOPBACK_FAILED;
11717                 data[1] = TG3_LOOPBACK_FAILED;
11718                 if (do_extlpbk)
11719                         data[2] = TG3_LOOPBACK_FAILED;
11720                 goto done;
11721         }
11722
11723         if (tg3_flag(tp, ENABLE_RSS)) {
11724                 int i;
11725
11726                 /* Reroute all rx packets to the 1st queue */
11727                 for (i = MAC_RSS_INDIR_TBL_0;
11728                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11729                         tw32(i, 0x0);
11730         }
11731
11732         /* HW errata - mac loopback fails in some cases on 5780.
11733          * Normal traffic and PHY loopback are not affected by
11734          * errata.  Also, the MAC loopback test is deprecated for
11735          * all newer ASIC revisions.
11736          */
11737         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11738             !tg3_flag(tp, CPMU_PRESENT)) {
11739                 tg3_mac_loopback(tp, true);
11740
11741                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11742                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11743
11744                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11745                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11746                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11747
11748                 tg3_mac_loopback(tp, false);
11749         }
11750
11751         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11752             !tg3_flag(tp, USE_PHYLIB)) {
11753                 int i;
11754
11755                 tg3_phy_lpbk_set(tp, 0, false);
11756
11757                 /* Wait for link */
11758                 for (i = 0; i < 100; i++) {
11759                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11760                                 break;
11761                         mdelay(1);
11762                 }
11763
11764                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11765                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11766                 if (tg3_flag(tp, TSO_CAPABLE) &&
11767                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11768                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11769                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11770                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11771                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11772
11773                 if (do_extlpbk) {
11774                         tg3_phy_lpbk_set(tp, 0, true);
11775
11776                         /* All link indications report up, but the hardware
11777                          * isn't really ready for about 20 msec.  Double it
11778                          * to be sure.
11779                          */
11780                         mdelay(40);
11781
11782                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11783                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11784                         if (tg3_flag(tp, TSO_CAPABLE) &&
11785                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11786                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11787                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11788                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11789                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11790                 }
11791
11792                 /* Re-enable gphy autopowerdown. */
11793                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11794                         tg3_phy_toggle_apd(tp, true);
11795         }
11796
11797         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11798
11799 done:
11800         tp->phy_flags |= eee_cap;
11801
11802         return err;
11803 }
11804
11805 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11806                           u64 *data)
11807 {
11808         struct tg3 *tp = netdev_priv(dev);
11809         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11810
11811         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11812             tg3_power_up(tp)) {
11813                 etest->flags |= ETH_TEST_FL_FAILED;
11814                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11815                 return;
11816         }
11817
11818         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11819
11820         if (tg3_test_nvram(tp) != 0) {
11821                 etest->flags |= ETH_TEST_FL_FAILED;
11822                 data[0] = 1;
11823         }
11824         if (!doextlpbk && tg3_test_link(tp)) {
11825                 etest->flags |= ETH_TEST_FL_FAILED;
11826                 data[1] = 1;
11827         }
11828         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11829                 int err, err2 = 0, irq_sync = 0;
11830
11831                 if (netif_running(dev)) {
11832                         tg3_phy_stop(tp);
11833                         tg3_netif_stop(tp);
11834                         irq_sync = 1;
11835                 }
11836
11837                 tg3_full_lock(tp, irq_sync);
11838
11839                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11840                 err = tg3_nvram_lock(tp);
11841                 tg3_halt_cpu(tp, RX_CPU_BASE);
11842                 if (!tg3_flag(tp, 5705_PLUS))
11843                         tg3_halt_cpu(tp, TX_CPU_BASE);
11844                 if (!err)
11845                         tg3_nvram_unlock(tp);
11846
11847                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11848                         tg3_phy_reset(tp);
11849
11850                 if (tg3_test_registers(tp) != 0) {
11851                         etest->flags |= ETH_TEST_FL_FAILED;
11852                         data[2] = 1;
11853                 }
11854
11855                 if (tg3_test_memory(tp) != 0) {
11856                         etest->flags |= ETH_TEST_FL_FAILED;
11857                         data[3] = 1;
11858                 }
11859
11860                 if (doextlpbk)
11861                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11862
11863                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11864                         etest->flags |= ETH_TEST_FL_FAILED;
11865
11866                 tg3_full_unlock(tp);
11867
11868                 if (tg3_test_interrupt(tp) != 0) {
11869                         etest->flags |= ETH_TEST_FL_FAILED;
11870                         data[7] = 1;
11871                 }
11872
11873                 tg3_full_lock(tp, 0);
11874
11875                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11876                 if (netif_running(dev)) {
11877                         tg3_flag_set(tp, INIT_COMPLETE);
11878                         err2 = tg3_restart_hw(tp, 1);
11879                         if (!err2)
11880                                 tg3_netif_start(tp);
11881                 }
11882
11883                 tg3_full_unlock(tp);
11884
11885                 if (irq_sync && !err2)
11886                         tg3_phy_start(tp);
11887         }
11888         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11889                 tg3_power_down(tp);
11890
11891 }
11892
11893 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11894 {
11895         struct mii_ioctl_data *data = if_mii(ifr);
11896         struct tg3 *tp = netdev_priv(dev);
11897         int err;
11898
11899         if (tg3_flag(tp, USE_PHYLIB)) {
11900                 struct phy_device *phydev;
11901                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11902                         return -EAGAIN;
11903                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11904                 return phy_mii_ioctl(phydev, ifr, cmd);
11905         }
11906
11907         switch (cmd) {
11908         case SIOCGMIIPHY:
11909                 data->phy_id = tp->phy_addr;
11910
11911                 /* fallthru */
11912         case SIOCGMIIREG: {
11913                 u32 mii_regval;
11914
11915                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11916                         break;                  /* We have no PHY */
11917
11918                 if (!netif_running(dev))
11919                         return -EAGAIN;
11920
11921                 spin_lock_bh(&tp->lock);
11922                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11923                 spin_unlock_bh(&tp->lock);
11924
11925                 data->val_out = mii_regval;
11926
11927                 return err;
11928         }
11929
11930         case SIOCSMIIREG:
11931                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11932                         break;                  /* We have no PHY */
11933
11934                 if (!netif_running(dev))
11935                         return -EAGAIN;
11936
11937                 spin_lock_bh(&tp->lock);
11938                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11939                 spin_unlock_bh(&tp->lock);
11940
11941                 return err;
11942
11943         default:
11944                 /* do nothing */
11945                 break;
11946         }
11947         return -EOPNOTSUPP;
11948 }
11949
11950 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11951 {
11952         struct tg3 *tp = netdev_priv(dev);
11953
11954         memcpy(ec, &tp->coal, sizeof(*ec));
11955         return 0;
11956 }
11957
11958 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11959 {
11960         struct tg3 *tp = netdev_priv(dev);
11961         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11962         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11963
11964         if (!tg3_flag(tp, 5705_PLUS)) {
11965                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11966                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11967                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11968                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11969         }
11970
11971         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11972             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11973             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11974             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11975             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11976             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11977             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11978             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11979             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11980             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11981                 return -EINVAL;
11982
11983         /* No rx interrupts will be generated if both are zero */
11984         if ((ec->rx_coalesce_usecs == 0) &&
11985             (ec->rx_max_coalesced_frames == 0))
11986                 return -EINVAL;
11987
11988         /* No tx interrupts will be generated if both are zero */
11989         if ((ec->tx_coalesce_usecs == 0) &&
11990             (ec->tx_max_coalesced_frames == 0))
11991                 return -EINVAL;
11992
11993         /* Only copy relevant parameters, ignore all others. */
11994         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11995         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11996         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11997         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11998         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11999         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12000         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12001         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12002         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12003
12004         if (netif_running(dev)) {
12005                 tg3_full_lock(tp, 0);
12006                 __tg3_set_coalesce(tp, &tp->coal);
12007                 tg3_full_unlock(tp);
12008         }
12009         return 0;
12010 }
12011
12012 static const struct ethtool_ops tg3_ethtool_ops = {
12013         .get_settings           = tg3_get_settings,
12014         .set_settings           = tg3_set_settings,
12015         .get_drvinfo            = tg3_get_drvinfo,
12016         .get_regs_len           = tg3_get_regs_len,
12017         .get_regs               = tg3_get_regs,
12018         .get_wol                = tg3_get_wol,
12019         .set_wol                = tg3_set_wol,
12020         .get_msglevel           = tg3_get_msglevel,
12021         .set_msglevel           = tg3_set_msglevel,
12022         .nway_reset             = tg3_nway_reset,
12023         .get_link               = ethtool_op_get_link,
12024         .get_eeprom_len         = tg3_get_eeprom_len,
12025         .get_eeprom             = tg3_get_eeprom,
12026         .set_eeprom             = tg3_set_eeprom,
12027         .get_ringparam          = tg3_get_ringparam,
12028         .set_ringparam          = tg3_set_ringparam,
12029         .get_pauseparam         = tg3_get_pauseparam,
12030         .set_pauseparam         = tg3_set_pauseparam,
12031         .self_test              = tg3_self_test,
12032         .get_strings            = tg3_get_strings,
12033         .set_phys_id            = tg3_set_phys_id,
12034         .get_ethtool_stats      = tg3_get_ethtool_stats,
12035         .get_coalesce           = tg3_get_coalesce,
12036         .set_coalesce           = tg3_set_coalesce,
12037         .get_sset_count         = tg3_get_sset_count,
12038 };
12039
12040 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12041 {
12042         u32 cursize, val, magic;
12043
12044         tp->nvram_size = EEPROM_CHIP_SIZE;
12045
12046         if (tg3_nvram_read(tp, 0, &magic) != 0)
12047                 return;
12048
12049         if ((magic != TG3_EEPROM_MAGIC) &&
12050             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12051             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12052                 return;
12053
12054         /*
12055          * Size the chip by reading offsets at increasing powers of two.
12056          * When we encounter our validation signature, we know the addressing
12057          * has wrapped around, and thus have our chip size.
12058          */
12059         cursize = 0x10;
12060
12061         while (cursize < tp->nvram_size) {
12062                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12063                         return;
12064
12065                 if (val == magic)
12066                         break;
12067
12068                 cursize <<= 1;
12069         }
12070
12071         tp->nvram_size = cursize;
12072 }
12073
12074 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12075 {
12076         u32 val;
12077
12078         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12079                 return;
12080
12081         /* Selfboot format */
12082         if (val != TG3_EEPROM_MAGIC) {
12083                 tg3_get_eeprom_size(tp);
12084                 return;
12085         }
12086
12087         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12088                 if (val != 0) {
12089                         /* This is confusing.  We want to operate on the
12090                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12091                          * call will read from NVRAM and byteswap the data
12092                          * according to the byteswapping settings for all
12093                          * other register accesses.  This ensures the data we
12094                          * want will always reside in the lower 16-bits.
12095                          * However, the data in NVRAM is in LE format, which
12096                          * means the data from the NVRAM read will always be
12097                          * opposite the endianness of the CPU.  The 16-bit
12098                          * byteswap then brings the data to CPU endianness.
12099                          */
12100                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12101                         return;
12102                 }
12103         }
12104         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12105 }
12106
12107 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12108 {
12109         u32 nvcfg1;
12110
12111         nvcfg1 = tr32(NVRAM_CFG1);
12112         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12113                 tg3_flag_set(tp, FLASH);
12114         } else {
12115                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12116                 tw32(NVRAM_CFG1, nvcfg1);
12117         }
12118
12119         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12120             tg3_flag(tp, 5780_CLASS)) {
12121                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12122                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12123                         tp->nvram_jedecnum = JEDEC_ATMEL;
12124                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12125                         tg3_flag_set(tp, NVRAM_BUFFERED);
12126                         break;
12127                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12128                         tp->nvram_jedecnum = JEDEC_ATMEL;
12129                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12130                         break;
12131                 case FLASH_VENDOR_ATMEL_EEPROM:
12132                         tp->nvram_jedecnum = JEDEC_ATMEL;
12133                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12134                         tg3_flag_set(tp, NVRAM_BUFFERED);
12135                         break;
12136                 case FLASH_VENDOR_ST:
12137                         tp->nvram_jedecnum = JEDEC_ST;
12138                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12139                         tg3_flag_set(tp, NVRAM_BUFFERED);
12140                         break;
12141                 case FLASH_VENDOR_SAIFUN:
12142                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12143                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12144                         break;
12145                 case FLASH_VENDOR_SST_SMALL:
12146                 case FLASH_VENDOR_SST_LARGE:
12147                         tp->nvram_jedecnum = JEDEC_SST;
12148                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12149                         break;
12150                 }
12151         } else {
12152                 tp->nvram_jedecnum = JEDEC_ATMEL;
12153                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12154                 tg3_flag_set(tp, NVRAM_BUFFERED);
12155         }
12156 }
12157
12158 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12159 {
12160         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12161         case FLASH_5752PAGE_SIZE_256:
12162                 tp->nvram_pagesize = 256;
12163                 break;
12164         case FLASH_5752PAGE_SIZE_512:
12165                 tp->nvram_pagesize = 512;
12166                 break;
12167         case FLASH_5752PAGE_SIZE_1K:
12168                 tp->nvram_pagesize = 1024;
12169                 break;
12170         case FLASH_5752PAGE_SIZE_2K:
12171                 tp->nvram_pagesize = 2048;
12172                 break;
12173         case FLASH_5752PAGE_SIZE_4K:
12174                 tp->nvram_pagesize = 4096;
12175                 break;
12176         case FLASH_5752PAGE_SIZE_264:
12177                 tp->nvram_pagesize = 264;
12178                 break;
12179         case FLASH_5752PAGE_SIZE_528:
12180                 tp->nvram_pagesize = 528;
12181                 break;
12182         }
12183 }
12184
12185 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12186 {
12187         u32 nvcfg1;
12188
12189         nvcfg1 = tr32(NVRAM_CFG1);
12190
12191         /* NVRAM protection for TPM */
12192         if (nvcfg1 & (1 << 27))
12193                 tg3_flag_set(tp, PROTECTED_NVRAM);
12194
12195         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12196         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12197         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12198                 tp->nvram_jedecnum = JEDEC_ATMEL;
12199                 tg3_flag_set(tp, NVRAM_BUFFERED);
12200                 break;
12201         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12202                 tp->nvram_jedecnum = JEDEC_ATMEL;
12203                 tg3_flag_set(tp, NVRAM_BUFFERED);
12204                 tg3_flag_set(tp, FLASH);
12205                 break;
12206         case FLASH_5752VENDOR_ST_M45PE10:
12207         case FLASH_5752VENDOR_ST_M45PE20:
12208         case FLASH_5752VENDOR_ST_M45PE40:
12209                 tp->nvram_jedecnum = JEDEC_ST;
12210                 tg3_flag_set(tp, NVRAM_BUFFERED);
12211                 tg3_flag_set(tp, FLASH);
12212                 break;
12213         }
12214
12215         if (tg3_flag(tp, FLASH)) {
12216                 tg3_nvram_get_pagesize(tp, nvcfg1);
12217         } else {
12218                 /* For eeprom, set pagesize to maximum eeprom size */
12219                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12220
12221                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12222                 tw32(NVRAM_CFG1, nvcfg1);
12223         }
12224 }
12225
12226 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12227 {
12228         u32 nvcfg1, protect = 0;
12229
12230         nvcfg1 = tr32(NVRAM_CFG1);
12231
12232         /* NVRAM protection for TPM */
12233         if (nvcfg1 & (1 << 27)) {
12234                 tg3_flag_set(tp, PROTECTED_NVRAM);
12235                 protect = 1;
12236         }
12237
12238         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12239         switch (nvcfg1) {
12240         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12241         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12242         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12243         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12244                 tp->nvram_jedecnum = JEDEC_ATMEL;
12245                 tg3_flag_set(tp, NVRAM_BUFFERED);
12246                 tg3_flag_set(tp, FLASH);
12247                 tp->nvram_pagesize = 264;
12248                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12249                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12250                         tp->nvram_size = (protect ? 0x3e200 :
12251                                           TG3_NVRAM_SIZE_512KB);
12252                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12253                         tp->nvram_size = (protect ? 0x1f200 :
12254                                           TG3_NVRAM_SIZE_256KB);
12255                 else
12256                         tp->nvram_size = (protect ? 0x1f200 :
12257                                           TG3_NVRAM_SIZE_128KB);
12258                 break;
12259         case FLASH_5752VENDOR_ST_M45PE10:
12260         case FLASH_5752VENDOR_ST_M45PE20:
12261         case FLASH_5752VENDOR_ST_M45PE40:
12262                 tp->nvram_jedecnum = JEDEC_ST;
12263                 tg3_flag_set(tp, NVRAM_BUFFERED);
12264                 tg3_flag_set(tp, FLASH);
12265                 tp->nvram_pagesize = 256;
12266                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12267                         tp->nvram_size = (protect ?
12268                                           TG3_NVRAM_SIZE_64KB :
12269                                           TG3_NVRAM_SIZE_128KB);
12270                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12271                         tp->nvram_size = (protect ?
12272                                           TG3_NVRAM_SIZE_64KB :
12273                                           TG3_NVRAM_SIZE_256KB);
12274                 else
12275                         tp->nvram_size = (protect ?
12276                                           TG3_NVRAM_SIZE_128KB :
12277                                           TG3_NVRAM_SIZE_512KB);
12278                 break;
12279         }
12280 }
12281
12282 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12283 {
12284         u32 nvcfg1;
12285
12286         nvcfg1 = tr32(NVRAM_CFG1);
12287
12288         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12289         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12290         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12291         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12292         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12293                 tp->nvram_jedecnum = JEDEC_ATMEL;
12294                 tg3_flag_set(tp, NVRAM_BUFFERED);
12295                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12296
12297                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12298                 tw32(NVRAM_CFG1, nvcfg1);
12299                 break;
12300         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12301         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12302         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12303         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12304                 tp->nvram_jedecnum = JEDEC_ATMEL;
12305                 tg3_flag_set(tp, NVRAM_BUFFERED);
12306                 tg3_flag_set(tp, FLASH);
12307                 tp->nvram_pagesize = 264;
12308                 break;
12309         case FLASH_5752VENDOR_ST_M45PE10:
12310         case FLASH_5752VENDOR_ST_M45PE20:
12311         case FLASH_5752VENDOR_ST_M45PE40:
12312                 tp->nvram_jedecnum = JEDEC_ST;
12313                 tg3_flag_set(tp, NVRAM_BUFFERED);
12314                 tg3_flag_set(tp, FLASH);
12315                 tp->nvram_pagesize = 256;
12316                 break;
12317         }
12318 }
12319
12320 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12321 {
12322         u32 nvcfg1, protect = 0;
12323
12324         nvcfg1 = tr32(NVRAM_CFG1);
12325
12326         /* NVRAM protection for TPM */
12327         if (nvcfg1 & (1 << 27)) {
12328                 tg3_flag_set(tp, PROTECTED_NVRAM);
12329                 protect = 1;
12330         }
12331
12332         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12333         switch (nvcfg1) {
12334         case FLASH_5761VENDOR_ATMEL_ADB021D:
12335         case FLASH_5761VENDOR_ATMEL_ADB041D:
12336         case FLASH_5761VENDOR_ATMEL_ADB081D:
12337         case FLASH_5761VENDOR_ATMEL_ADB161D:
12338         case FLASH_5761VENDOR_ATMEL_MDB021D:
12339         case FLASH_5761VENDOR_ATMEL_MDB041D:
12340         case FLASH_5761VENDOR_ATMEL_MDB081D:
12341         case FLASH_5761VENDOR_ATMEL_MDB161D:
12342                 tp->nvram_jedecnum = JEDEC_ATMEL;
12343                 tg3_flag_set(tp, NVRAM_BUFFERED);
12344                 tg3_flag_set(tp, FLASH);
12345                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12346                 tp->nvram_pagesize = 256;
12347                 break;
12348         case FLASH_5761VENDOR_ST_A_M45PE20:
12349         case FLASH_5761VENDOR_ST_A_M45PE40:
12350         case FLASH_5761VENDOR_ST_A_M45PE80:
12351         case FLASH_5761VENDOR_ST_A_M45PE16:
12352         case FLASH_5761VENDOR_ST_M_M45PE20:
12353         case FLASH_5761VENDOR_ST_M_M45PE40:
12354         case FLASH_5761VENDOR_ST_M_M45PE80:
12355         case FLASH_5761VENDOR_ST_M_M45PE16:
12356                 tp->nvram_jedecnum = JEDEC_ST;
12357                 tg3_flag_set(tp, NVRAM_BUFFERED);
12358                 tg3_flag_set(tp, FLASH);
12359                 tp->nvram_pagesize = 256;
12360                 break;
12361         }
12362
12363         if (protect) {
12364                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12365         } else {
12366                 switch (nvcfg1) {
12367                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12368                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12369                 case FLASH_5761VENDOR_ST_A_M45PE16:
12370                 case FLASH_5761VENDOR_ST_M_M45PE16:
12371                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12372                         break;
12373                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12374                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12375                 case FLASH_5761VENDOR_ST_A_M45PE80:
12376                 case FLASH_5761VENDOR_ST_M_M45PE80:
12377                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12378                         break;
12379                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12380                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12381                 case FLASH_5761VENDOR_ST_A_M45PE40:
12382                 case FLASH_5761VENDOR_ST_M_M45PE40:
12383                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12384                         break;
12385                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12386                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12387                 case FLASH_5761VENDOR_ST_A_M45PE20:
12388                 case FLASH_5761VENDOR_ST_M_M45PE20:
12389                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12390                         break;
12391                 }
12392         }
12393 }
12394
12395 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12396 {
12397         tp->nvram_jedecnum = JEDEC_ATMEL;
12398         tg3_flag_set(tp, NVRAM_BUFFERED);
12399         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12400 }
12401
12402 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12403 {
12404         u32 nvcfg1;
12405
12406         nvcfg1 = tr32(NVRAM_CFG1);
12407
12408         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12409         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12410         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12411                 tp->nvram_jedecnum = JEDEC_ATMEL;
12412                 tg3_flag_set(tp, NVRAM_BUFFERED);
12413                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12414
12415                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12416                 tw32(NVRAM_CFG1, nvcfg1);
12417                 return;
12418         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12419         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12420         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12421         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12422         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12423         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12424         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12425                 tp->nvram_jedecnum = JEDEC_ATMEL;
12426                 tg3_flag_set(tp, NVRAM_BUFFERED);
12427                 tg3_flag_set(tp, FLASH);
12428
12429                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12430                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12431                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12432                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12433                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12434                         break;
12435                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12436                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12437                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12438                         break;
12439                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12440                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12441                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12442                         break;
12443                 }
12444                 break;
12445         case FLASH_5752VENDOR_ST_M45PE10:
12446         case FLASH_5752VENDOR_ST_M45PE20:
12447         case FLASH_5752VENDOR_ST_M45PE40:
12448                 tp->nvram_jedecnum = JEDEC_ST;
12449                 tg3_flag_set(tp, NVRAM_BUFFERED);
12450                 tg3_flag_set(tp, FLASH);
12451
12452                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12453                 case FLASH_5752VENDOR_ST_M45PE10:
12454                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12455                         break;
12456                 case FLASH_5752VENDOR_ST_M45PE20:
12457                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12458                         break;
12459                 case FLASH_5752VENDOR_ST_M45PE40:
12460                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12461                         break;
12462                 }
12463                 break;
12464         default:
12465                 tg3_flag_set(tp, NO_NVRAM);
12466                 return;
12467         }
12468
12469         tg3_nvram_get_pagesize(tp, nvcfg1);
12470         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12471                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12472 }
12473
12474
12475 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12476 {
12477         u32 nvcfg1;
12478
12479         nvcfg1 = tr32(NVRAM_CFG1);
12480
12481         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12482         case FLASH_5717VENDOR_ATMEL_EEPROM:
12483         case FLASH_5717VENDOR_MICRO_EEPROM:
12484                 tp->nvram_jedecnum = JEDEC_ATMEL;
12485                 tg3_flag_set(tp, NVRAM_BUFFERED);
12486                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12487
12488                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12489                 tw32(NVRAM_CFG1, nvcfg1);
12490                 return;
12491         case FLASH_5717VENDOR_ATMEL_MDB011D:
12492         case FLASH_5717VENDOR_ATMEL_ADB011B:
12493         case FLASH_5717VENDOR_ATMEL_ADB011D:
12494         case FLASH_5717VENDOR_ATMEL_MDB021D:
12495         case FLASH_5717VENDOR_ATMEL_ADB021B:
12496         case FLASH_5717VENDOR_ATMEL_ADB021D:
12497         case FLASH_5717VENDOR_ATMEL_45USPT:
12498                 tp->nvram_jedecnum = JEDEC_ATMEL;
12499                 tg3_flag_set(tp, NVRAM_BUFFERED);
12500                 tg3_flag_set(tp, FLASH);
12501
12502                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12503                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12504                         /* Detect size with tg3_nvram_get_size() */
12505                         break;
12506                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12507                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12508                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12509                         break;
12510                 default:
12511                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12512                         break;
12513                 }
12514                 break;
12515         case FLASH_5717VENDOR_ST_M_M25PE10:
12516         case FLASH_5717VENDOR_ST_A_M25PE10:
12517         case FLASH_5717VENDOR_ST_M_M45PE10:
12518         case FLASH_5717VENDOR_ST_A_M45PE10:
12519         case FLASH_5717VENDOR_ST_M_M25PE20:
12520         case FLASH_5717VENDOR_ST_A_M25PE20:
12521         case FLASH_5717VENDOR_ST_M_M45PE20:
12522         case FLASH_5717VENDOR_ST_A_M45PE20:
12523         case FLASH_5717VENDOR_ST_25USPT:
12524         case FLASH_5717VENDOR_ST_45USPT:
12525                 tp->nvram_jedecnum = JEDEC_ST;
12526                 tg3_flag_set(tp, NVRAM_BUFFERED);
12527                 tg3_flag_set(tp, FLASH);
12528
12529                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12530                 case FLASH_5717VENDOR_ST_M_M25PE20:
12531                 case FLASH_5717VENDOR_ST_M_M45PE20:
12532                         /* Detect size with tg3_nvram_get_size() */
12533                         break;
12534                 case FLASH_5717VENDOR_ST_A_M25PE20:
12535                 case FLASH_5717VENDOR_ST_A_M45PE20:
12536                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12537                         break;
12538                 default:
12539                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12540                         break;
12541                 }
12542                 break;
12543         default:
12544                 tg3_flag_set(tp, NO_NVRAM);
12545                 return;
12546         }
12547
12548         tg3_nvram_get_pagesize(tp, nvcfg1);
12549         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12550                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12551 }
12552
12553 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12554 {
12555         u32 nvcfg1, nvmpinstrp;
12556
12557         nvcfg1 = tr32(NVRAM_CFG1);
12558         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12559
12560         switch (nvmpinstrp) {
12561         case FLASH_5720_EEPROM_HD:
12562         case FLASH_5720_EEPROM_LD:
12563                 tp->nvram_jedecnum = JEDEC_ATMEL;
12564                 tg3_flag_set(tp, NVRAM_BUFFERED);
12565
12566                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12567                 tw32(NVRAM_CFG1, nvcfg1);
12568                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12569                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12570                 else
12571                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12572                 return;
12573         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12574         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12575         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12576         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12577         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12578         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12579         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12580         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12581         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12582         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12583         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12584         case FLASH_5720VENDOR_ATMEL_45USPT:
12585                 tp->nvram_jedecnum = JEDEC_ATMEL;
12586                 tg3_flag_set(tp, NVRAM_BUFFERED);
12587                 tg3_flag_set(tp, FLASH);
12588
12589                 switch (nvmpinstrp) {
12590                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12591                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12592                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12593                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12594                         break;
12595                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12596                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12597                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12598                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12599                         break;
12600                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12601                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12602                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12603                         break;
12604                 default:
12605                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12606                         break;
12607                 }
12608                 break;
12609         case FLASH_5720VENDOR_M_ST_M25PE10:
12610         case FLASH_5720VENDOR_M_ST_M45PE10:
12611         case FLASH_5720VENDOR_A_ST_M25PE10:
12612         case FLASH_5720VENDOR_A_ST_M45PE10:
12613         case FLASH_5720VENDOR_M_ST_M25PE20:
12614         case FLASH_5720VENDOR_M_ST_M45PE20:
12615         case FLASH_5720VENDOR_A_ST_M25PE20:
12616         case FLASH_5720VENDOR_A_ST_M45PE20:
12617         case FLASH_5720VENDOR_M_ST_M25PE40:
12618         case FLASH_5720VENDOR_M_ST_M45PE40:
12619         case FLASH_5720VENDOR_A_ST_M25PE40:
12620         case FLASH_5720VENDOR_A_ST_M45PE40:
12621         case FLASH_5720VENDOR_M_ST_M25PE80:
12622         case FLASH_5720VENDOR_M_ST_M45PE80:
12623         case FLASH_5720VENDOR_A_ST_M25PE80:
12624         case FLASH_5720VENDOR_A_ST_M45PE80:
12625         case FLASH_5720VENDOR_ST_25USPT:
12626         case FLASH_5720VENDOR_ST_45USPT:
12627                 tp->nvram_jedecnum = JEDEC_ST;
12628                 tg3_flag_set(tp, NVRAM_BUFFERED);
12629                 tg3_flag_set(tp, FLASH);
12630
12631                 switch (nvmpinstrp) {
12632                 case FLASH_5720VENDOR_M_ST_M25PE20:
12633                 case FLASH_5720VENDOR_M_ST_M45PE20:
12634                 case FLASH_5720VENDOR_A_ST_M25PE20:
12635                 case FLASH_5720VENDOR_A_ST_M45PE20:
12636                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12637                         break;
12638                 case FLASH_5720VENDOR_M_ST_M25PE40:
12639                 case FLASH_5720VENDOR_M_ST_M45PE40:
12640                 case FLASH_5720VENDOR_A_ST_M25PE40:
12641                 case FLASH_5720VENDOR_A_ST_M45PE40:
12642                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12643                         break;
12644                 case FLASH_5720VENDOR_M_ST_M25PE80:
12645                 case FLASH_5720VENDOR_M_ST_M45PE80:
12646                 case FLASH_5720VENDOR_A_ST_M25PE80:
12647                 case FLASH_5720VENDOR_A_ST_M45PE80:
12648                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12649                         break;
12650                 default:
12651                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12652                         break;
12653                 }
12654                 break;
12655         default:
12656                 tg3_flag_set(tp, NO_NVRAM);
12657                 return;
12658         }
12659
12660         tg3_nvram_get_pagesize(tp, nvcfg1);
12661         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12662                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12663 }
12664
12665 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12666 static void __devinit tg3_nvram_init(struct tg3 *tp)
12667 {
12668         tw32_f(GRC_EEPROM_ADDR,
12669              (EEPROM_ADDR_FSM_RESET |
12670               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12671                EEPROM_ADDR_CLKPERD_SHIFT)));
12672
12673         msleep(1);
12674
12675         /* Enable seeprom accesses. */
12676         tw32_f(GRC_LOCAL_CTRL,
12677              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12678         udelay(100);
12679
12680         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12681             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12682                 tg3_flag_set(tp, NVRAM);
12683
12684                 if (tg3_nvram_lock(tp)) {
12685                         netdev_warn(tp->dev,
12686                                     "Cannot get nvram lock, %s failed\n",
12687                                     __func__);
12688                         return;
12689                 }
12690                 tg3_enable_nvram_access(tp);
12691
12692                 tp->nvram_size = 0;
12693
12694                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12695                         tg3_get_5752_nvram_info(tp);
12696                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12697                         tg3_get_5755_nvram_info(tp);
12698                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12699                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12700                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12701                         tg3_get_5787_nvram_info(tp);
12702                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12703                         tg3_get_5761_nvram_info(tp);
12704                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12705                         tg3_get_5906_nvram_info(tp);
12706                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12707                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12708                         tg3_get_57780_nvram_info(tp);
12709                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12710                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12711                         tg3_get_5717_nvram_info(tp);
12712                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12713                         tg3_get_5720_nvram_info(tp);
12714                 else
12715                         tg3_get_nvram_info(tp);
12716
12717                 if (tp->nvram_size == 0)
12718                         tg3_get_nvram_size(tp);
12719
12720                 tg3_disable_nvram_access(tp);
12721                 tg3_nvram_unlock(tp);
12722
12723         } else {
12724                 tg3_flag_clear(tp, NVRAM);
12725                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12726
12727                 tg3_get_eeprom_size(tp);
12728         }
12729 }
12730
12731 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12732                                     u32 offset, u32 len, u8 *buf)
12733 {
12734         int i, j, rc = 0;
12735         u32 val;
12736
12737         for (i = 0; i < len; i += 4) {
12738                 u32 addr;
12739                 __be32 data;
12740
12741                 addr = offset + i;
12742
12743                 memcpy(&data, buf + i, 4);
12744
12745                 /*
12746                  * The SEEPROM interface expects the data to always be opposite
12747                  * the native endian format.  We accomplish this by reversing
12748                  * all the operations that would have been performed on the
12749                  * data from a call to tg3_nvram_read_be32().
12750                  */
12751                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12752
12753                 val = tr32(GRC_EEPROM_ADDR);
12754                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12755
12756                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12757                         EEPROM_ADDR_READ);
12758                 tw32(GRC_EEPROM_ADDR, val |
12759                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12760                         (addr & EEPROM_ADDR_ADDR_MASK) |
12761                         EEPROM_ADDR_START |
12762                         EEPROM_ADDR_WRITE);
12763
12764                 for (j = 0; j < 1000; j++) {
12765                         val = tr32(GRC_EEPROM_ADDR);
12766
12767                         if (val & EEPROM_ADDR_COMPLETE)
12768                                 break;
12769                         msleep(1);
12770                 }
12771                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12772                         rc = -EBUSY;
12773                         break;
12774                 }
12775         }
12776
12777         return rc;
12778 }
12779
12780 /* offset and length are dword aligned */
12781 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12782                 u8 *buf)
12783 {
12784         int ret = 0;
12785         u32 pagesize = tp->nvram_pagesize;
12786         u32 pagemask = pagesize - 1;
12787         u32 nvram_cmd;
12788         u8 *tmp;
12789
12790         tmp = kmalloc(pagesize, GFP_KERNEL);
12791         if (tmp == NULL)
12792                 return -ENOMEM;
12793
12794         while (len) {
12795                 int j;
12796                 u32 phy_addr, page_off, size;
12797
12798                 phy_addr = offset & ~pagemask;
12799
12800                 for (j = 0; j < pagesize; j += 4) {
12801                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12802                                                   (__be32 *) (tmp + j));
12803                         if (ret)
12804                                 break;
12805                 }
12806                 if (ret)
12807                         break;
12808
12809                 page_off = offset & pagemask;
12810                 size = pagesize;
12811                 if (len < size)
12812                         size = len;
12813
12814                 len -= size;
12815
12816                 memcpy(tmp + page_off, buf, size);
12817
12818                 offset = offset + (pagesize - page_off);
12819
12820                 tg3_enable_nvram_access(tp);
12821
12822                 /*
12823                  * Before we can erase the flash page, we need
12824                  * to issue a special "write enable" command.
12825                  */
12826                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12827
12828                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12829                         break;
12830
12831                 /* Erase the target page */
12832                 tw32(NVRAM_ADDR, phy_addr);
12833
12834                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12835                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12836
12837                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12838                         break;
12839
12840                 /* Issue another write enable to start the write. */
12841                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12842
12843                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12844                         break;
12845
12846                 for (j = 0; j < pagesize; j += 4) {
12847                         __be32 data;
12848
12849                         data = *((__be32 *) (tmp + j));
12850
12851                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12852
12853                         tw32(NVRAM_ADDR, phy_addr + j);
12854
12855                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12856                                 NVRAM_CMD_WR;
12857
12858                         if (j == 0)
12859                                 nvram_cmd |= NVRAM_CMD_FIRST;
12860                         else if (j == (pagesize - 4))
12861                                 nvram_cmd |= NVRAM_CMD_LAST;
12862
12863                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12864                                 break;
12865                 }
12866                 if (ret)
12867                         break;
12868         }
12869
12870         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12871         tg3_nvram_exec_cmd(tp, nvram_cmd);
12872
12873         kfree(tmp);
12874
12875         return ret;
12876 }
12877
12878 /* offset and length are dword aligned */
12879 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12880                 u8 *buf)
12881 {
12882         int i, ret = 0;
12883
12884         for (i = 0; i < len; i += 4, offset += 4) {
12885                 u32 page_off, phy_addr, nvram_cmd;
12886                 __be32 data;
12887
12888                 memcpy(&data, buf + i, 4);
12889                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12890
12891                 page_off = offset % tp->nvram_pagesize;
12892
12893                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12894
12895                 tw32(NVRAM_ADDR, phy_addr);
12896
12897                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12898
12899                 if (page_off == 0 || i == 0)
12900                         nvram_cmd |= NVRAM_CMD_FIRST;
12901                 if (page_off == (tp->nvram_pagesize - 4))
12902                         nvram_cmd |= NVRAM_CMD_LAST;
12903
12904                 if (i == (len - 4))
12905                         nvram_cmd |= NVRAM_CMD_LAST;
12906
12907                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12908                     !tg3_flag(tp, 5755_PLUS) &&
12909                     (tp->nvram_jedecnum == JEDEC_ST) &&
12910                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12911
12912                         if ((ret = tg3_nvram_exec_cmd(tp,
12913                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12914                                 NVRAM_CMD_DONE)))
12915
12916                                 break;
12917                 }
12918                 if (!tg3_flag(tp, FLASH)) {
12919                         /* We always do complete word writes to eeprom. */
12920                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12921                 }
12922
12923                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12924                         break;
12925         }
12926         return ret;
12927 }
12928
12929 /* offset and length are dword aligned */
12930 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12931 {
12932         int ret;
12933
12934         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12935                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12936                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12937                 udelay(40);
12938         }
12939
12940         if (!tg3_flag(tp, NVRAM)) {
12941                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12942         } else {
12943                 u32 grc_mode;
12944
12945                 ret = tg3_nvram_lock(tp);
12946                 if (ret)
12947                         return ret;
12948
12949                 tg3_enable_nvram_access(tp);
12950                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12951                         tw32(NVRAM_WRITE1, 0x406);
12952
12953                 grc_mode = tr32(GRC_MODE);
12954                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12955
12956                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12957                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12958                                 buf);
12959                 } else {
12960                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12961                                 buf);
12962                 }
12963
12964                 grc_mode = tr32(GRC_MODE);
12965                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12966
12967                 tg3_disable_nvram_access(tp);
12968                 tg3_nvram_unlock(tp);
12969         }
12970
12971         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12972                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12973                 udelay(40);
12974         }
12975
12976         return ret;
12977 }
12978
12979 struct subsys_tbl_ent {
12980         u16 subsys_vendor, subsys_devid;
12981         u32 phy_id;
12982 };
12983
12984 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12985         /* Broadcom boards. */
12986         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12987           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12988         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12989           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12990         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12991           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12992         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12993           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12994         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12995           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12996         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12997           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12998         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12999           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13000         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13001           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13002         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13003           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13004         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13005           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13007           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13008
13009         /* 3com boards. */
13010         { TG3PCI_SUBVENDOR_ID_3COM,
13011           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13012         { TG3PCI_SUBVENDOR_ID_3COM,
13013           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13014         { TG3PCI_SUBVENDOR_ID_3COM,
13015           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13016         { TG3PCI_SUBVENDOR_ID_3COM,
13017           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13018         { TG3PCI_SUBVENDOR_ID_3COM,
13019           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13020
13021         /* DELL boards. */
13022         { TG3PCI_SUBVENDOR_ID_DELL,
13023           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13024         { TG3PCI_SUBVENDOR_ID_DELL,
13025           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13026         { TG3PCI_SUBVENDOR_ID_DELL,
13027           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13028         { TG3PCI_SUBVENDOR_ID_DELL,
13029           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13030
13031         /* Compaq boards. */
13032         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13033           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13034         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13035           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13036         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13037           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13038         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13039           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13040         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13041           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13042
13043         /* IBM boards. */
13044         { TG3PCI_SUBVENDOR_ID_IBM,
13045           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13046 };
13047
13048 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13049 {
13050         int i;
13051
13052         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13053                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13054                      tp->pdev->subsystem_vendor) &&
13055                     (subsys_id_to_phy_id[i].subsys_devid ==
13056                      tp->pdev->subsystem_device))
13057                         return &subsys_id_to_phy_id[i];
13058         }
13059         return NULL;
13060 }
13061
13062 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13063 {
13064         u32 val;
13065
13066         tp->phy_id = TG3_PHY_ID_INVALID;
13067         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13068
13069         /* Assume an onboard device and WOL capable by default.  */
13070         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13071         tg3_flag_set(tp, WOL_CAP);
13072
13073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13074                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13075                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13076                         tg3_flag_set(tp, IS_NIC);
13077                 }
13078                 val = tr32(VCPU_CFGSHDW);
13079                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13080                         tg3_flag_set(tp, ASPM_WORKAROUND);
13081                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13082                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13083                         tg3_flag_set(tp, WOL_ENABLE);
13084                         device_set_wakeup_enable(&tp->pdev->dev, true);
13085                 }
13086                 goto done;
13087         }
13088
13089         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13090         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13091                 u32 nic_cfg, led_cfg;
13092                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13093                 int eeprom_phy_serdes = 0;
13094
13095                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13096                 tp->nic_sram_data_cfg = nic_cfg;
13097
13098                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13099                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13100                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13101                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13102                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13103                     (ver > 0) && (ver < 0x100))
13104                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13105
13106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13107                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13108
13109                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13110                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13111                         eeprom_phy_serdes = 1;
13112
13113                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13114                 if (nic_phy_id != 0) {
13115                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13116                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13117
13118                         eeprom_phy_id  = (id1 >> 16) << 10;
13119                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13120                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13121                 } else
13122                         eeprom_phy_id = 0;
13123
13124                 tp->phy_id = eeprom_phy_id;
13125                 if (eeprom_phy_serdes) {
13126                         if (!tg3_flag(tp, 5705_PLUS))
13127                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13128                         else
13129                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13130                 }
13131
13132                 if (tg3_flag(tp, 5750_PLUS))
13133                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13134                                     SHASTA_EXT_LED_MODE_MASK);
13135                 else
13136                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13137
13138                 switch (led_cfg) {
13139                 default:
13140                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13141                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13142                         break;
13143
13144                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13145                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13146                         break;
13147
13148                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13149                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13150
13151                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13152                          * read on some older 5700/5701 bootcode.
13153                          */
13154                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13155                             ASIC_REV_5700 ||
13156                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13157                             ASIC_REV_5701)
13158                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13159
13160                         break;
13161
13162                 case SHASTA_EXT_LED_SHARED:
13163                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13164                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13165                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13166                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13167                                                  LED_CTRL_MODE_PHY_2);
13168                         break;
13169
13170                 case SHASTA_EXT_LED_MAC:
13171                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13172                         break;
13173
13174                 case SHASTA_EXT_LED_COMBO:
13175                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13176                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13177                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13178                                                  LED_CTRL_MODE_PHY_2);
13179                         break;
13180
13181                 }
13182
13183                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13184                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13185                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13186                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13187
13188                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13189                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13190
13191                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13192                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13193                         if ((tp->pdev->subsystem_vendor ==
13194                              PCI_VENDOR_ID_ARIMA) &&
13195                             (tp->pdev->subsystem_device == 0x205a ||
13196                              tp->pdev->subsystem_device == 0x2063))
13197                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13198                 } else {
13199                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13200                         tg3_flag_set(tp, IS_NIC);
13201                 }
13202
13203                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13204                         tg3_flag_set(tp, ENABLE_ASF);
13205                         if (tg3_flag(tp, 5750_PLUS))
13206                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13207                 }
13208
13209                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13210                     tg3_flag(tp, 5750_PLUS))
13211                         tg3_flag_set(tp, ENABLE_APE);
13212
13213                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13214                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13215                         tg3_flag_clear(tp, WOL_CAP);
13216
13217                 if (tg3_flag(tp, WOL_CAP) &&
13218                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13219                         tg3_flag_set(tp, WOL_ENABLE);
13220                         device_set_wakeup_enable(&tp->pdev->dev, true);
13221                 }
13222
13223                 if (cfg2 & (1 << 17))
13224                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13225
13226                 /* serdes signal pre-emphasis in register 0x590 set by */
13227                 /* bootcode if bit 18 is set */
13228                 if (cfg2 & (1 << 18))
13229                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13230
13231                 if ((tg3_flag(tp, 57765_PLUS) ||
13232                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13233                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13234                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13235                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13236
13237                 if (tg3_flag(tp, PCI_EXPRESS) &&
13238                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13239                     !tg3_flag(tp, 57765_PLUS)) {
13240                         u32 cfg3;
13241
13242                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13243                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13244                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13245                 }
13246
13247                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13248                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13249                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13250                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13251                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13252                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13253         }
13254 done:
13255         if (tg3_flag(tp, WOL_CAP))
13256                 device_set_wakeup_enable(&tp->pdev->dev,
13257                                          tg3_flag(tp, WOL_ENABLE));
13258         else
13259                 device_set_wakeup_capable(&tp->pdev->dev, false);
13260 }
13261
13262 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13263 {
13264         int i;
13265         u32 val;
13266
13267         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13268         tw32(OTP_CTRL, cmd);
13269
13270         /* Wait for up to 1 ms for command to execute. */
13271         for (i = 0; i < 100; i++) {
13272                 val = tr32(OTP_STATUS);
13273                 if (val & OTP_STATUS_CMD_DONE)
13274                         break;
13275                 udelay(10);
13276         }
13277
13278         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13279 }
13280
13281 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13282  * configuration is a 32-bit value that straddles the alignment boundary.
13283  * We do two 32-bit reads and then shift and merge the results.
13284  */
13285 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13286 {
13287         u32 bhalf_otp, thalf_otp;
13288
13289         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13290
13291         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13292                 return 0;
13293
13294         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13295
13296         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13297                 return 0;
13298
13299         thalf_otp = tr32(OTP_READ_DATA);
13300
13301         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13302
13303         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13304                 return 0;
13305
13306         bhalf_otp = tr32(OTP_READ_DATA);
13307
13308         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13309 }
13310
13311 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13312 {
13313         u32 adv = ADVERTISED_Autoneg |
13314                   ADVERTISED_Pause;
13315
13316         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13317                 adv |= ADVERTISED_1000baseT_Half |
13318                        ADVERTISED_1000baseT_Full;
13319
13320         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13321                 adv |= ADVERTISED_100baseT_Half |
13322                        ADVERTISED_100baseT_Full |
13323                        ADVERTISED_10baseT_Half |
13324                        ADVERTISED_10baseT_Full |
13325                        ADVERTISED_TP;
13326         else
13327                 adv |= ADVERTISED_FIBRE;
13328
13329         tp->link_config.advertising = adv;
13330         tp->link_config.speed = SPEED_INVALID;
13331         tp->link_config.duplex = DUPLEX_INVALID;
13332         tp->link_config.autoneg = AUTONEG_ENABLE;
13333         tp->link_config.active_speed = SPEED_INVALID;
13334         tp->link_config.active_duplex = DUPLEX_INVALID;
13335         tp->link_config.orig_speed = SPEED_INVALID;
13336         tp->link_config.orig_duplex = DUPLEX_INVALID;
13337         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13338 }
13339
13340 static int __devinit tg3_phy_probe(struct tg3 *tp)
13341 {
13342         u32 hw_phy_id_1, hw_phy_id_2;
13343         u32 hw_phy_id, hw_phy_id_masked;
13344         int err;
13345
13346         /* flow control autonegotiation is default behavior */
13347         tg3_flag_set(tp, PAUSE_AUTONEG);
13348         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13349
13350         if (tg3_flag(tp, USE_PHYLIB))
13351                 return tg3_phy_init(tp);
13352
13353         /* Reading the PHY ID register can conflict with ASF
13354          * firmware access to the PHY hardware.
13355          */
13356         err = 0;
13357         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13358                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13359         } else {
13360                 /* Now read the physical PHY_ID from the chip and verify
13361                  * that it is sane.  If it doesn't look good, we fall back
13362                  * to either the hard-coded table based PHY_ID and failing
13363                  * that the value found in the eeprom area.
13364                  */
13365                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13366                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13367
13368                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13369                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13370                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13371
13372                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13373         }
13374
13375         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13376                 tp->phy_id = hw_phy_id;
13377                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13378                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13379                 else
13380                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13381         } else {
13382                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13383                         /* Do nothing, phy ID already set up in
13384                          * tg3_get_eeprom_hw_cfg().
13385                          */
13386                 } else {
13387                         struct subsys_tbl_ent *p;
13388
13389                         /* No eeprom signature?  Try the hardcoded
13390                          * subsys device table.
13391                          */
13392                         p = tg3_lookup_by_subsys(tp);
13393                         if (!p)
13394                                 return -ENODEV;
13395
13396                         tp->phy_id = p->phy_id;
13397                         if (!tp->phy_id ||
13398                             tp->phy_id == TG3_PHY_ID_BCM8002)
13399                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13400                 }
13401         }
13402
13403         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13404             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13405              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13406              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13407               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13408              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13409               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13410                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13411
13412         tg3_phy_init_link_config(tp);
13413
13414         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13415             !tg3_flag(tp, ENABLE_APE) &&
13416             !tg3_flag(tp, ENABLE_ASF)) {
13417                 u32 bmsr, mask;
13418
13419                 tg3_readphy(tp, MII_BMSR, &bmsr);
13420                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13421                     (bmsr & BMSR_LSTATUS))
13422                         goto skip_phy_reset;
13423
13424                 err = tg3_phy_reset(tp);
13425                 if (err)
13426                         return err;
13427
13428                 tg3_phy_set_wirespeed(tp);
13429
13430                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13431                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13432                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13433                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13434                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13435                                             tp->link_config.flowctrl);
13436
13437                         tg3_writephy(tp, MII_BMCR,
13438                                      BMCR_ANENABLE | BMCR_ANRESTART);
13439                 }
13440         }
13441
13442 skip_phy_reset:
13443         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13444                 err = tg3_init_5401phy_dsp(tp);
13445                 if (err)
13446                         return err;
13447
13448                 err = tg3_init_5401phy_dsp(tp);
13449         }
13450
13451         return err;
13452 }
13453
13454 static void __devinit tg3_read_vpd(struct tg3 *tp)
13455 {
13456         u8 *vpd_data;
13457         unsigned int block_end, rosize, len;
13458         u32 vpdlen;
13459         int j, i = 0;
13460
13461         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13462         if (!vpd_data)
13463                 goto out_no_vpd;
13464
13465         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13466         if (i < 0)
13467                 goto out_not_found;
13468
13469         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13470         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13471         i += PCI_VPD_LRDT_TAG_SIZE;
13472
13473         if (block_end > vpdlen)
13474                 goto out_not_found;
13475
13476         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13477                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13478         if (j > 0) {
13479                 len = pci_vpd_info_field_size(&vpd_data[j]);
13480
13481                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13482                 if (j + len > block_end || len != 4 ||
13483                     memcmp(&vpd_data[j], "1028", 4))
13484                         goto partno;
13485
13486                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13487                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13488                 if (j < 0)
13489                         goto partno;
13490
13491                 len = pci_vpd_info_field_size(&vpd_data[j]);
13492
13493                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13494                 if (j + len > block_end)
13495                         goto partno;
13496
13497                 if (len >= sizeof(tp->fw_ver))
13498                         len = sizeof(tp->fw_ver) - 1;
13499                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
13500                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
13501                          &vpd_data[j]);
13502         }
13503
13504 partno:
13505         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13506                                       PCI_VPD_RO_KEYWORD_PARTNO);
13507         if (i < 0)
13508                 goto out_not_found;
13509
13510         len = pci_vpd_info_field_size(&vpd_data[i]);
13511
13512         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13513         if (len > TG3_BPN_SIZE ||
13514             (len + i) > vpdlen)
13515                 goto out_not_found;
13516
13517         memcpy(tp->board_part_number, &vpd_data[i], len);
13518
13519 out_not_found:
13520         kfree(vpd_data);
13521         if (tp->board_part_number[0])
13522                 return;
13523
13524 out_no_vpd:
13525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13526                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13527                         strcpy(tp->board_part_number, "BCM5717");
13528                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13529                         strcpy(tp->board_part_number, "BCM5718");
13530                 else
13531                         goto nomatch;
13532         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13533                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13534                         strcpy(tp->board_part_number, "BCM57780");
13535                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13536                         strcpy(tp->board_part_number, "BCM57760");
13537                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13538                         strcpy(tp->board_part_number, "BCM57790");
13539                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13540                         strcpy(tp->board_part_number, "BCM57788");
13541                 else
13542                         goto nomatch;
13543         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13544                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13545                         strcpy(tp->board_part_number, "BCM57761");
13546                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13547                         strcpy(tp->board_part_number, "BCM57765");
13548                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13549                         strcpy(tp->board_part_number, "BCM57781");
13550                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13551                         strcpy(tp->board_part_number, "BCM57785");
13552                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13553                         strcpy(tp->board_part_number, "BCM57791");
13554                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13555                         strcpy(tp->board_part_number, "BCM57795");
13556                 else
13557                         goto nomatch;
13558         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13559                 strcpy(tp->board_part_number, "BCM95906");
13560         } else {
13561 nomatch:
13562                 strcpy(tp->board_part_number, "none");
13563         }
13564 }
13565
13566 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13567 {
13568         u32 val;
13569
13570         if (tg3_nvram_read(tp, offset, &val) ||
13571             (val & 0xfc000000) != 0x0c000000 ||
13572             tg3_nvram_read(tp, offset + 4, &val) ||
13573             val != 0)
13574                 return 0;
13575
13576         return 1;
13577 }
13578
13579 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13580 {
13581         u32 val, offset, start, ver_offset;
13582         int i, dst_off;
13583         bool newver = false;
13584
13585         if (tg3_nvram_read(tp, 0xc, &offset) ||
13586             tg3_nvram_read(tp, 0x4, &start))
13587                 return;
13588
13589         offset = tg3_nvram_logical_addr(tp, offset);
13590
13591         if (tg3_nvram_read(tp, offset, &val))
13592                 return;
13593
13594         if ((val & 0xfc000000) == 0x0c000000) {
13595                 if (tg3_nvram_read(tp, offset + 4, &val))
13596                         return;
13597
13598                 if (val == 0)
13599                         newver = true;
13600         }
13601
13602         dst_off = strlen(tp->fw_ver);
13603
13604         if (newver) {
13605                 if (TG3_VER_SIZE - dst_off < 16 ||
13606                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13607                         return;
13608
13609                 offset = offset + ver_offset - start;
13610                 for (i = 0; i < 16; i += 4) {
13611                         __be32 v;
13612                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13613                                 return;
13614
13615                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13616                 }
13617         } else {
13618                 u32 major, minor;
13619
13620                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13621                         return;
13622
13623                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13624                         TG3_NVM_BCVER_MAJSFT;
13625                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13626                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13627                          "v%d.%02d", major, minor);
13628         }
13629 }
13630
13631 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13632 {
13633         u32 val, major, minor;
13634
13635         /* Use native endian representation */
13636         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13637                 return;
13638
13639         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13640                 TG3_NVM_HWSB_CFG1_MAJSFT;
13641         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13642                 TG3_NVM_HWSB_CFG1_MINSFT;
13643
13644         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13645 }
13646
13647 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13648 {
13649         u32 offset, major, minor, build;
13650
13651         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13652
13653         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13654                 return;
13655
13656         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13657         case TG3_EEPROM_SB_REVISION_0:
13658                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13659                 break;
13660         case TG3_EEPROM_SB_REVISION_2:
13661                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13662                 break;
13663         case TG3_EEPROM_SB_REVISION_3:
13664                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13665                 break;
13666         case TG3_EEPROM_SB_REVISION_4:
13667                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13668                 break;
13669         case TG3_EEPROM_SB_REVISION_5:
13670                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13671                 break;
13672         case TG3_EEPROM_SB_REVISION_6:
13673                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13674                 break;
13675         default:
13676                 return;
13677         }
13678
13679         if (tg3_nvram_read(tp, offset, &val))
13680                 return;
13681
13682         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13683                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13684         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13685                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13686         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13687
13688         if (minor > 99 || build > 26)
13689                 return;
13690
13691         offset = strlen(tp->fw_ver);
13692         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13693                  " v%d.%02d", major, minor);
13694
13695         if (build > 0) {
13696                 offset = strlen(tp->fw_ver);
13697                 if (offset < TG3_VER_SIZE - 1)
13698                         tp->fw_ver[offset] = 'a' + build - 1;
13699         }
13700 }
13701
13702 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13703 {
13704         u32 val, offset, start;
13705         int i, vlen;
13706
13707         for (offset = TG3_NVM_DIR_START;
13708              offset < TG3_NVM_DIR_END;
13709              offset += TG3_NVM_DIRENT_SIZE) {
13710                 if (tg3_nvram_read(tp, offset, &val))
13711                         return;
13712
13713                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13714                         break;
13715         }
13716
13717         if (offset == TG3_NVM_DIR_END)
13718                 return;
13719
13720         if (!tg3_flag(tp, 5705_PLUS))
13721                 start = 0x08000000;
13722         else if (tg3_nvram_read(tp, offset - 4, &start))
13723                 return;
13724
13725         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13726             !tg3_fw_img_is_valid(tp, offset) ||
13727             tg3_nvram_read(tp, offset + 8, &val))
13728                 return;
13729
13730         offset += val - start;
13731
13732         vlen = strlen(tp->fw_ver);
13733
13734         tp->fw_ver[vlen++] = ',';
13735         tp->fw_ver[vlen++] = ' ';
13736
13737         for (i = 0; i < 4; i++) {
13738                 __be32 v;
13739                 if (tg3_nvram_read_be32(tp, offset, &v))
13740                         return;
13741
13742                 offset += sizeof(v);
13743
13744                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13745                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13746                         break;
13747                 }
13748
13749                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13750                 vlen += sizeof(v);
13751         }
13752 }
13753
13754 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13755 {
13756         int vlen;
13757         u32 apedata;
13758         char *fwtype;
13759
13760         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13761                 return;
13762
13763         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13764         if (apedata != APE_SEG_SIG_MAGIC)
13765                 return;
13766
13767         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13768         if (!(apedata & APE_FW_STATUS_READY))
13769                 return;
13770
13771         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13772
13773         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13774                 tg3_flag_set(tp, APE_HAS_NCSI);
13775                 fwtype = "NCSI";
13776         } else {
13777                 fwtype = "DASH";
13778         }
13779
13780         vlen = strlen(tp->fw_ver);
13781
13782         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13783                  fwtype,
13784                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13785                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13786                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13787                  (apedata & APE_FW_VERSION_BLDMSK));
13788 }
13789
13790 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13791 {
13792         u32 val;
13793         bool vpd_vers = false;
13794
13795         if (tp->fw_ver[0] != 0)
13796                 vpd_vers = true;
13797
13798         if (tg3_flag(tp, NO_NVRAM)) {
13799                 strcat(tp->fw_ver, "sb");
13800                 return;
13801         }
13802
13803         if (tg3_nvram_read(tp, 0, &val))
13804                 return;
13805
13806         if (val == TG3_EEPROM_MAGIC)
13807                 tg3_read_bc_ver(tp);
13808         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13809                 tg3_read_sb_ver(tp, val);
13810         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13811                 tg3_read_hwsb_ver(tp);
13812         else
13813                 return;
13814
13815         if (vpd_vers)
13816                 goto done;
13817
13818         if (tg3_flag(tp, ENABLE_APE)) {
13819                 if (tg3_flag(tp, ENABLE_ASF))
13820                         tg3_read_dash_ver(tp);
13821         } else if (tg3_flag(tp, ENABLE_ASF)) {
13822                 tg3_read_mgmtfw_ver(tp);
13823         }
13824
13825 done:
13826         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13827 }
13828
13829 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13830
13831 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13832 {
13833         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13834                 return TG3_RX_RET_MAX_SIZE_5717;
13835         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13836                 return TG3_RX_RET_MAX_SIZE_5700;
13837         else
13838                 return TG3_RX_RET_MAX_SIZE_5705;
13839 }
13840
13841 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13842         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13843         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13844         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13845         { },
13846 };
13847
13848 static int __devinit tg3_get_invariants(struct tg3 *tp)
13849 {
13850         u32 misc_ctrl_reg;
13851         u32 pci_state_reg, grc_misc_cfg;
13852         u32 val;
13853         u16 pci_cmd;
13854         int err;
13855
13856         /* Force memory write invalidate off.  If we leave it on,
13857          * then on 5700_BX chips we have to enable a workaround.
13858          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13859          * to match the cacheline size.  The Broadcom driver have this
13860          * workaround but turns MWI off all the times so never uses
13861          * it.  This seems to suggest that the workaround is insufficient.
13862          */
13863         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13864         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13865         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13866
13867         /* Important! -- Make sure register accesses are byteswapped
13868          * correctly.  Also, for those chips that require it, make
13869          * sure that indirect register accesses are enabled before
13870          * the first operation.
13871          */
13872         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13873                               &misc_ctrl_reg);
13874         tp->misc_host_ctrl |= (misc_ctrl_reg &
13875                                MISC_HOST_CTRL_CHIPREV);
13876         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13877                                tp->misc_host_ctrl);
13878
13879         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13880                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13881         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13882                 u32 prod_id_asic_rev;
13883
13884                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13885                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13886                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13887                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13888                         pci_read_config_dword(tp->pdev,
13889                                               TG3PCI_GEN2_PRODID_ASICREV,
13890                                               &prod_id_asic_rev);
13891                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13892                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13893                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13894                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13895                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13896                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13897                         pci_read_config_dword(tp->pdev,
13898                                               TG3PCI_GEN15_PRODID_ASICREV,
13899                                               &prod_id_asic_rev);
13900                 else
13901                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13902                                               &prod_id_asic_rev);
13903
13904                 tp->pci_chip_rev_id = prod_id_asic_rev;
13905         }
13906
13907         /* Wrong chip ID in 5752 A0. This code can be removed later
13908          * as A0 is not in production.
13909          */
13910         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13911                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13912
13913         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13914          * we need to disable memory and use config. cycles
13915          * only to access all registers. The 5702/03 chips
13916          * can mistakenly decode the special cycles from the
13917          * ICH chipsets as memory write cycles, causing corruption
13918          * of register and memory space. Only certain ICH bridges
13919          * will drive special cycles with non-zero data during the
13920          * address phase which can fall within the 5703's address
13921          * range. This is not an ICH bug as the PCI spec allows
13922          * non-zero address during special cycles. However, only
13923          * these ICH bridges are known to drive non-zero addresses
13924          * during special cycles.
13925          *
13926          * Since special cycles do not cross PCI bridges, we only
13927          * enable this workaround if the 5703 is on the secondary
13928          * bus of these ICH bridges.
13929          */
13930         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13931             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13932                 static struct tg3_dev_id {
13933                         u32     vendor;
13934                         u32     device;
13935                         u32     rev;
13936                 } ich_chipsets[] = {
13937                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13938                           PCI_ANY_ID },
13939                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13940                           PCI_ANY_ID },
13941                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13942                           0xa },
13943                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13944                           PCI_ANY_ID },
13945                         { },
13946                 };
13947                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13948                 struct pci_dev *bridge = NULL;
13949
13950                 while (pci_id->vendor != 0) {
13951                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13952                                                 bridge);
13953                         if (!bridge) {
13954                                 pci_id++;
13955                                 continue;
13956                         }
13957                         if (pci_id->rev != PCI_ANY_ID) {
13958                                 if (bridge->revision > pci_id->rev)
13959                                         continue;
13960                         }
13961                         if (bridge->subordinate &&
13962                             (bridge->subordinate->number ==
13963                              tp->pdev->bus->number)) {
13964                                 tg3_flag_set(tp, ICH_WORKAROUND);
13965                                 pci_dev_put(bridge);
13966                                 break;
13967                         }
13968                 }
13969         }
13970
13971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13972                 static struct tg3_dev_id {
13973                         u32     vendor;
13974                         u32     device;
13975                 } bridge_chipsets[] = {
13976                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13977                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13978                         { },
13979                 };
13980                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13981                 struct pci_dev *bridge = NULL;
13982
13983                 while (pci_id->vendor != 0) {
13984                         bridge = pci_get_device(pci_id->vendor,
13985                                                 pci_id->device,
13986                                                 bridge);
13987                         if (!bridge) {
13988                                 pci_id++;
13989                                 continue;
13990                         }
13991                         if (bridge->subordinate &&
13992                             (bridge->subordinate->number <=
13993                              tp->pdev->bus->number) &&
13994                             (bridge->subordinate->subordinate >=
13995                              tp->pdev->bus->number)) {
13996                                 tg3_flag_set(tp, 5701_DMA_BUG);
13997                                 pci_dev_put(bridge);
13998                                 break;
13999                         }
14000                 }
14001         }
14002
14003         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14004          * DMA addresses > 40-bit. This bridge may have other additional
14005          * 57xx devices behind it in some 4-port NIC designs for example.
14006          * Any tg3 device found behind the bridge will also need the 40-bit
14007          * DMA workaround.
14008          */
14009         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14011                 tg3_flag_set(tp, 5780_CLASS);
14012                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14013                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14014         } else {
14015                 struct pci_dev *bridge = NULL;
14016
14017                 do {
14018                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14019                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14020                                                 bridge);
14021                         if (bridge && bridge->subordinate &&
14022                             (bridge->subordinate->number <=
14023                              tp->pdev->bus->number) &&
14024                             (bridge->subordinate->subordinate >=
14025                              tp->pdev->bus->number)) {
14026                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14027                                 pci_dev_put(bridge);
14028                                 break;
14029                         }
14030                 } while (bridge);
14031         }
14032
14033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14035                 tp->pdev_peer = tg3_find_peer(tp);
14036
14037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14039             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14040                 tg3_flag_set(tp, 5717_PLUS);
14041
14042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14043             tg3_flag(tp, 5717_PLUS))
14044                 tg3_flag_set(tp, 57765_PLUS);
14045
14046         /* Intentionally exclude ASIC_REV_5906 */
14047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14048             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14053             tg3_flag(tp, 57765_PLUS))
14054                 tg3_flag_set(tp, 5755_PLUS);
14055
14056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14057             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14058             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14059             tg3_flag(tp, 5755_PLUS) ||
14060             tg3_flag(tp, 5780_CLASS))
14061                 tg3_flag_set(tp, 5750_PLUS);
14062
14063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14064             tg3_flag(tp, 5750_PLUS))
14065                 tg3_flag_set(tp, 5705_PLUS);
14066
14067         /* Determine TSO capabilities */
14068         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14069                 ; /* Do nothing. HW bug. */
14070         else if (tg3_flag(tp, 57765_PLUS))
14071                 tg3_flag_set(tp, HW_TSO_3);
14072         else if (tg3_flag(tp, 5755_PLUS) ||
14073                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14074                 tg3_flag_set(tp, HW_TSO_2);
14075         else if (tg3_flag(tp, 5750_PLUS)) {
14076                 tg3_flag_set(tp, HW_TSO_1);
14077                 tg3_flag_set(tp, TSO_BUG);
14078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14079                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14080                         tg3_flag_clear(tp, TSO_BUG);
14081         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14082                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14083                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14084                         tg3_flag_set(tp, TSO_BUG);
14085                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14086                         tp->fw_needed = FIRMWARE_TG3TSO5;
14087                 else
14088                         tp->fw_needed = FIRMWARE_TG3TSO;
14089         }
14090
14091         /* Selectively allow TSO based on operating conditions */
14092         if (tg3_flag(tp, HW_TSO_1) ||
14093             tg3_flag(tp, HW_TSO_2) ||
14094             tg3_flag(tp, HW_TSO_3) ||
14095             tp->fw_needed) {
14096                 /* For firmware TSO, assume ASF is disabled.
14097                  * We'll disable TSO later if we discover ASF
14098                  * is enabled in tg3_get_eeprom_hw_cfg().
14099                  */
14100                 tg3_flag_set(tp, TSO_CAPABLE);
14101         } else {
14102                 tg3_flag_clear(tp, TSO_CAPABLE);
14103                 tg3_flag_clear(tp, TSO_BUG);
14104                 tp->fw_needed = NULL;
14105         }
14106
14107         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14108                 tp->fw_needed = FIRMWARE_TG3;
14109
14110         tp->irq_max = 1;
14111
14112         if (tg3_flag(tp, 5750_PLUS)) {
14113                 tg3_flag_set(tp, SUPPORT_MSI);
14114                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14115                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14116                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14117                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14118                      tp->pdev_peer == tp->pdev))
14119                         tg3_flag_clear(tp, SUPPORT_MSI);
14120
14121                 if (tg3_flag(tp, 5755_PLUS) ||
14122                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14123                         tg3_flag_set(tp, 1SHOT_MSI);
14124                 }
14125
14126                 if (tg3_flag(tp, 57765_PLUS)) {
14127                         tg3_flag_set(tp, SUPPORT_MSIX);
14128                         tp->irq_max = TG3_IRQ_MAX_VECS;
14129                 }
14130         }
14131
14132         if (tg3_flag(tp, 5755_PLUS) ||
14133             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14134                 tg3_flag_set(tp, SHORT_DMA_BUG);
14135
14136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14137                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14138
14139         if (tg3_flag(tp, 5717_PLUS))
14140                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14141
14142         if (tg3_flag(tp, 57765_PLUS) &&
14143             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14144                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14145
14146         if (!tg3_flag(tp, 5705_PLUS) ||
14147             tg3_flag(tp, 5780_CLASS) ||
14148             tg3_flag(tp, USE_JUMBO_BDFLAG))
14149                 tg3_flag_set(tp, JUMBO_CAPABLE);
14150
14151         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14152                               &pci_state_reg);
14153
14154         if (pci_is_pcie(tp->pdev)) {
14155                 u16 lnkctl;
14156
14157                 tg3_flag_set(tp, PCI_EXPRESS);
14158
14159                 tp->pcie_readrq = 4096;
14160                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14161                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14162                         tp->pcie_readrq = 2048;
14163
14164                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14165
14166                 pci_read_config_word(tp->pdev,
14167                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14168                                      &lnkctl);
14169                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14170                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14171                             ASIC_REV_5906) {
14172                                 tg3_flag_clear(tp, HW_TSO_2);
14173                                 tg3_flag_clear(tp, TSO_CAPABLE);
14174                         }
14175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14176                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14177                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14178                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14179                                 tg3_flag_set(tp, CLKREQ_BUG);
14180                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14181                         tg3_flag_set(tp, L1PLLPD_EN);
14182                 }
14183         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14184                 /* BCM5785 devices are effectively PCIe devices, and should
14185                  * follow PCIe codepaths, but do not have a PCIe capabilities
14186                  * section.
14187                  */
14188                 tg3_flag_set(tp, PCI_EXPRESS);
14189         } else if (!tg3_flag(tp, 5705_PLUS) ||
14190                    tg3_flag(tp, 5780_CLASS)) {
14191                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14192                 if (!tp->pcix_cap) {
14193                         dev_err(&tp->pdev->dev,
14194                                 "Cannot find PCI-X capability, aborting\n");
14195                         return -EIO;
14196                 }
14197
14198                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14199                         tg3_flag_set(tp, PCIX_MODE);
14200         }
14201
14202         /* If we have an AMD 762 or VIA K8T800 chipset, write
14203          * reordering to the mailbox registers done by the host
14204          * controller can cause major troubles.  We read back from
14205          * every mailbox register write to force the writes to be
14206          * posted to the chip in order.
14207          */
14208         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14209             !tg3_flag(tp, PCI_EXPRESS))
14210                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14211
14212         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14213                              &tp->pci_cacheline_sz);
14214         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14215                              &tp->pci_lat_timer);
14216         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14217             tp->pci_lat_timer < 64) {
14218                 tp->pci_lat_timer = 64;
14219                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14220                                       tp->pci_lat_timer);
14221         }
14222
14223         /* Important! -- It is critical that the PCI-X hw workaround
14224          * situation is decided before the first MMIO register access.
14225          */
14226         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14227                 /* 5700 BX chips need to have their TX producer index
14228                  * mailboxes written twice to workaround a bug.
14229                  */
14230                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14231
14232                 /* If we are in PCI-X mode, enable register write workaround.
14233                  *
14234                  * The workaround is to use indirect register accesses
14235                  * for all chip writes not to mailbox registers.
14236                  */
14237                 if (tg3_flag(tp, PCIX_MODE)) {
14238                         u32 pm_reg;
14239
14240                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14241
14242                         /* The chip can have it's power management PCI config
14243                          * space registers clobbered due to this bug.
14244                          * So explicitly force the chip into D0 here.
14245                          */
14246                         pci_read_config_dword(tp->pdev,
14247                                               tp->pm_cap + PCI_PM_CTRL,
14248                                               &pm_reg);
14249                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14250                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14251                         pci_write_config_dword(tp->pdev,
14252                                                tp->pm_cap + PCI_PM_CTRL,
14253                                                pm_reg);
14254
14255                         /* Also, force SERR#/PERR# in PCI command. */
14256                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14257                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14258                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14259                 }
14260         }
14261
14262         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14263                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14264         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14265                 tg3_flag_set(tp, PCI_32BIT);
14266
14267         /* Chip-specific fixup from Broadcom driver */
14268         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14269             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14270                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14271                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14272         }
14273
14274         /* Default fast path register access methods */
14275         tp->read32 = tg3_read32;
14276         tp->write32 = tg3_write32;
14277         tp->read32_mbox = tg3_read32;
14278         tp->write32_mbox = tg3_write32;
14279         tp->write32_tx_mbox = tg3_write32;
14280         tp->write32_rx_mbox = tg3_write32;
14281
14282         /* Various workaround register access methods */
14283         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14284                 tp->write32 = tg3_write_indirect_reg32;
14285         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14286                  (tg3_flag(tp, PCI_EXPRESS) &&
14287                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14288                 /*
14289                  * Back to back register writes can cause problems on these
14290                  * chips, the workaround is to read back all reg writes
14291                  * except those to mailbox regs.
14292                  *
14293                  * See tg3_write_indirect_reg32().
14294                  */
14295                 tp->write32 = tg3_write_flush_reg32;
14296         }
14297
14298         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14299                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14300                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14301                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14302         }
14303
14304         if (tg3_flag(tp, ICH_WORKAROUND)) {
14305                 tp->read32 = tg3_read_indirect_reg32;
14306                 tp->write32 = tg3_write_indirect_reg32;
14307                 tp->read32_mbox = tg3_read_indirect_mbox;
14308                 tp->write32_mbox = tg3_write_indirect_mbox;
14309                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14310                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14311
14312                 iounmap(tp->regs);
14313                 tp->regs = NULL;
14314
14315                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14316                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14317                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14318         }
14319         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14320                 tp->read32_mbox = tg3_read32_mbox_5906;
14321                 tp->write32_mbox = tg3_write32_mbox_5906;
14322                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14323                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14324         }
14325
14326         if (tp->write32 == tg3_write_indirect_reg32 ||
14327             (tg3_flag(tp, PCIX_MODE) &&
14328              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14329               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14330                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14331
14332         /* The memory arbiter has to be enabled in order for SRAM accesses
14333          * to succeed.  Normally on powerup the tg3 chip firmware will make
14334          * sure it is enabled, but other entities such as system netboot
14335          * code might disable it.
14336          */
14337         val = tr32(MEMARB_MODE);
14338         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14339
14340         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14342             tg3_flag(tp, 5780_CLASS)) {
14343                 if (tg3_flag(tp, PCIX_MODE)) {
14344                         pci_read_config_dword(tp->pdev,
14345                                               tp->pcix_cap + PCI_X_STATUS,
14346                                               &val);
14347                         tp->pci_fn = val & 0x7;
14348                 }
14349         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14350                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14351                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14352                     NIC_SRAM_CPMUSTAT_SIG) {
14353                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14354                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14355                 }
14356         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14357                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14358                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14359                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14360                     NIC_SRAM_CPMUSTAT_SIG) {
14361                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14362                                      TG3_CPMU_STATUS_FSHFT_5719;
14363                 }
14364         }
14365
14366         /* Get eeprom hw config before calling tg3_set_power_state().
14367          * In particular, the TG3_FLAG_IS_NIC flag must be
14368          * determined before calling tg3_set_power_state() so that
14369          * we know whether or not to switch out of Vaux power.
14370          * When the flag is set, it means that GPIO1 is used for eeprom
14371          * write protect and also implies that it is a LOM where GPIOs
14372          * are not used to switch power.
14373          */
14374         tg3_get_eeprom_hw_cfg(tp);
14375
14376         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14377                 tg3_flag_clear(tp, TSO_CAPABLE);
14378                 tg3_flag_clear(tp, TSO_BUG);
14379                 tp->fw_needed = NULL;
14380         }
14381
14382         if (tg3_flag(tp, ENABLE_APE)) {
14383                 /* Allow reads and writes to the
14384                  * APE register and memory space.
14385                  */
14386                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14387                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14388                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14389                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14390                                        pci_state_reg);
14391
14392                 tg3_ape_lock_init(tp);
14393         }
14394
14395         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14396             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14397             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14398             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14399             tg3_flag(tp, 57765_PLUS))
14400                 tg3_flag_set(tp, CPMU_PRESENT);
14401
14402         /* Set up tp->grc_local_ctrl before calling
14403          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14404          * will bring 5700's external PHY out of reset.
14405          * It is also used as eeprom write protect on LOMs.
14406          */
14407         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14409             tg3_flag(tp, EEPROM_WRITE_PROT))
14410                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14411                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14412         /* Unused GPIO3 must be driven as output on 5752 because there
14413          * are no pull-up resistors on unused GPIO pins.
14414          */
14415         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14416                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14417
14418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14420             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14421                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14422
14423         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14424             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14425                 /* Turn off the debug UART. */
14426                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14427                 if (tg3_flag(tp, IS_NIC))
14428                         /* Keep VMain power. */
14429                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14430                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14431         }
14432
14433         /* Switch out of Vaux if it is a NIC */
14434         tg3_pwrsrc_switch_to_vmain(tp);
14435
14436         /* Derive initial jumbo mode from MTU assigned in
14437          * ether_setup() via the alloc_etherdev() call
14438          */
14439         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14440                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14441
14442         /* Determine WakeOnLan speed to use. */
14443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14444             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14445             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14446             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14447                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14448         } else {
14449                 tg3_flag_set(tp, WOL_SPEED_100MB);
14450         }
14451
14452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14453                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14454
14455         /* A few boards don't want Ethernet@WireSpeed phy feature */
14456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14457             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14458              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14459              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14460             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14461             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14462                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14463
14464         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14465             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14466                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14467         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14468                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14469
14470         if (tg3_flag(tp, 5705_PLUS) &&
14471             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14472             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14473             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14474             !tg3_flag(tp, 57765_PLUS)) {
14475                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14476                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14477                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14478                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14479                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14480                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14481                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14482                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14483                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14484                 } else
14485                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14486         }
14487
14488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14489             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14490                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14491                 if (tp->phy_otp == 0)
14492                         tp->phy_otp = TG3_OTP_DEFAULT;
14493         }
14494
14495         if (tg3_flag(tp, CPMU_PRESENT))
14496                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14497         else
14498                 tp->mi_mode = MAC_MI_MODE_BASE;
14499
14500         tp->coalesce_mode = 0;
14501         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14502             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14503                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14504
14505         /* Set these bits to enable statistics workaround. */
14506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14507             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14508             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14509                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14510                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14511         }
14512
14513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14514             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14515                 tg3_flag_set(tp, USE_PHYLIB);
14516
14517         err = tg3_mdio_init(tp);
14518         if (err)
14519                 return err;
14520
14521         /* Initialize data/descriptor byte/word swapping. */
14522         val = tr32(GRC_MODE);
14523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14524                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14525                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14526                         GRC_MODE_B2HRX_ENABLE |
14527                         GRC_MODE_HTX2B_ENABLE |
14528                         GRC_MODE_HOST_STACKUP);
14529         else
14530                 val &= GRC_MODE_HOST_STACKUP;
14531
14532         tw32(GRC_MODE, val | tp->grc_mode);
14533
14534         tg3_switch_clocks(tp);
14535
14536         /* Clear this out for sanity. */
14537         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14538
14539         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
14540         tw32(TG3PCI_REG_BASE_ADDR, 0);
14541
14542         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14543                               &pci_state_reg);
14544         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14545             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14546                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14547
14548                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14549                     chiprevid == CHIPREV_ID_5701_B0 ||
14550                     chiprevid == CHIPREV_ID_5701_B2 ||
14551                     chiprevid == CHIPREV_ID_5701_B5) {
14552                         void __iomem *sram_base;
14553
14554                         /* Write some dummy words into the SRAM status block
14555                          * area, see if it reads back correctly.  If the return
14556                          * value is bad, force enable the PCIX workaround.
14557                          */
14558                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14559
14560                         writel(0x00000000, sram_base);
14561                         writel(0x00000000, sram_base + 4);
14562                         writel(0xffffffff, sram_base + 4);
14563                         if (readl(sram_base) != 0x00000000)
14564                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14565                 }
14566         }
14567
14568         udelay(50);
14569         tg3_nvram_init(tp);
14570
14571         grc_misc_cfg = tr32(GRC_MISC_CFG);
14572         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14573
14574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14575             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14576              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14577                 tg3_flag_set(tp, IS_5788);
14578
14579         if (!tg3_flag(tp, IS_5788) &&
14580             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14581                 tg3_flag_set(tp, TAGGED_STATUS);
14582         if (tg3_flag(tp, TAGGED_STATUS)) {
14583                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14584                                       HOSTCC_MODE_CLRTICK_TXBD);
14585
14586                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14587                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14588                                        tp->misc_host_ctrl);
14589         }
14590
14591         /* Preserve the APE MAC_MODE bits */
14592         if (tg3_flag(tp, ENABLE_APE))
14593                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14594         else
14595                 tp->mac_mode = 0;
14596
14597         /* these are limited to 10/100 only */
14598         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14599              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14600             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14601              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14602              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14603               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14604               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14605             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14606              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14607               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14608               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14609             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14610             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14611             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14612             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14613                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14614
14615         err = tg3_phy_probe(tp);
14616         if (err) {
14617                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14618                 /* ... but do not return immediately ... */
14619                 tg3_mdio_fini(tp);
14620         }
14621
14622         tg3_read_vpd(tp);
14623         tg3_read_fw_ver(tp);
14624
14625         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14626                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14627         } else {
14628                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14629                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14630                 else
14631                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14632         }
14633
14634         /* 5700 {AX,BX} chips have a broken status block link
14635          * change bit implementation, so we must use the
14636          * status register in those cases.
14637          */
14638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14639                 tg3_flag_set(tp, USE_LINKCHG_REG);
14640         else
14641                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14642
14643         /* The led_ctrl is set during tg3_phy_probe, here we might
14644          * have to force the link status polling mechanism based
14645          * upon subsystem IDs.
14646          */
14647         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14648             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14649             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14650                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14651                 tg3_flag_set(tp, USE_LINKCHG_REG);
14652         }
14653
14654         /* For all SERDES we poll the MAC status register. */
14655         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14656                 tg3_flag_set(tp, POLL_SERDES);
14657         else
14658                 tg3_flag_clear(tp, POLL_SERDES);
14659
14660         tp->rx_offset = NET_IP_ALIGN;
14661         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14663             tg3_flag(tp, PCIX_MODE)) {
14664                 tp->rx_offset = 0;
14665 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14666                 tp->rx_copy_thresh = ~(u16)0;
14667 #endif
14668         }
14669
14670         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14671         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14672         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14673
14674         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14675
14676         /* Increment the rx prod index on the rx std ring by at most
14677          * 8 for these chips to workaround hw errata.
14678          */
14679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14682                 tp->rx_std_max_post = 8;
14683
14684         if (tg3_flag(tp, ASPM_WORKAROUND))
14685                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14686                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14687
14688         return err;
14689 }
14690
14691 #ifdef CONFIG_SPARC
14692 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14693 {
14694         struct net_device *dev = tp->dev;
14695         struct pci_dev *pdev = tp->pdev;
14696         struct device_node *dp = pci_device_to_OF_node(pdev);
14697         const unsigned char *addr;
14698         int len;
14699
14700         addr = of_get_property(dp, "local-mac-address", &len);
14701         if (addr && len == 6) {
14702                 memcpy(dev->dev_addr, addr, 6);
14703                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14704                 return 0;
14705         }
14706         return -ENODEV;
14707 }
14708
14709 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14710 {
14711         struct net_device *dev = tp->dev;
14712
14713         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14714         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14715         return 0;
14716 }
14717 #endif
14718
14719 static int __devinit tg3_get_device_address(struct tg3 *tp)
14720 {
14721         struct net_device *dev = tp->dev;
14722         u32 hi, lo, mac_offset;
14723         int addr_ok = 0;
14724
14725 #ifdef CONFIG_SPARC
14726         if (!tg3_get_macaddr_sparc(tp))
14727                 return 0;
14728 #endif
14729
14730         mac_offset = 0x7c;
14731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14732             tg3_flag(tp, 5780_CLASS)) {
14733                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14734                         mac_offset = 0xcc;
14735                 if (tg3_nvram_lock(tp))
14736                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14737                 else
14738                         tg3_nvram_unlock(tp);
14739         } else if (tg3_flag(tp, 5717_PLUS)) {
14740                 if (tp->pci_fn & 1)
14741                         mac_offset = 0xcc;
14742                 if (tp->pci_fn > 1)
14743                         mac_offset += 0x18c;
14744         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14745                 mac_offset = 0x10;
14746
14747         /* First try to get it from MAC address mailbox. */
14748         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14749         if ((hi >> 16) == 0x484b) {
14750                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14751                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14752
14753                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14754                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14755                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14756                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14757                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14758
14759                 /* Some old bootcode may report a 0 MAC address in SRAM */
14760                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14761         }
14762         if (!addr_ok) {
14763                 /* Next, try NVRAM. */
14764                 if (!tg3_flag(tp, NO_NVRAM) &&
14765                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14766                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14767                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14768                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14769                 }
14770                 /* Finally just fetch it out of the MAC control regs. */
14771                 else {
14772                         hi = tr32(MAC_ADDR_0_HIGH);
14773                         lo = tr32(MAC_ADDR_0_LOW);
14774
14775                         dev->dev_addr[5] = lo & 0xff;
14776                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14777                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14778                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14779                         dev->dev_addr[1] = hi & 0xff;
14780                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14781                 }
14782         }
14783
14784         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14785 #ifdef CONFIG_SPARC
14786                 if (!tg3_get_default_macaddr_sparc(tp))
14787                         return 0;
14788 #endif
14789                 return -EINVAL;
14790         }
14791         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14792         return 0;
14793 }
14794
14795 #define BOUNDARY_SINGLE_CACHELINE       1
14796 #define BOUNDARY_MULTI_CACHELINE        2
14797
14798 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14799 {
14800         int cacheline_size;
14801         u8 byte;
14802         int goal;
14803
14804         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14805         if (byte == 0)
14806                 cacheline_size = 1024;
14807         else
14808                 cacheline_size = (int) byte * 4;
14809
14810         /* On 5703 and later chips, the boundary bits have no
14811          * effect.
14812          */
14813         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14814             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14815             !tg3_flag(tp, PCI_EXPRESS))
14816                 goto out;
14817
14818 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14819         goal = BOUNDARY_MULTI_CACHELINE;
14820 #else
14821 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14822         goal = BOUNDARY_SINGLE_CACHELINE;
14823 #else
14824         goal = 0;
14825 #endif
14826 #endif
14827
14828         if (tg3_flag(tp, 57765_PLUS)) {
14829                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14830                 goto out;
14831         }
14832
14833         if (!goal)
14834                 goto out;
14835
14836         /* PCI controllers on most RISC systems tend to disconnect
14837          * when a device tries to burst across a cache-line boundary.
14838          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14839          *
14840          * Unfortunately, for PCI-E there are only limited
14841          * write-side controls for this, and thus for reads
14842          * we will still get the disconnects.  We'll also waste
14843          * these PCI cycles for both read and write for chips
14844          * other than 5700 and 5701 which do not implement the
14845          * boundary bits.
14846          */
14847         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14848                 switch (cacheline_size) {
14849                 case 16:
14850                 case 32:
14851                 case 64:
14852                 case 128:
14853                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14854                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14855                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14856                         } else {
14857                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14858                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14859                         }
14860                         break;
14861
14862                 case 256:
14863                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14864                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14865                         break;
14866
14867                 default:
14868                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14869                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14870                         break;
14871                 }
14872         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14873                 switch (cacheline_size) {
14874                 case 16:
14875                 case 32:
14876                 case 64:
14877                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14878                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14879                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14880                                 break;
14881                         }
14882                         /* fallthrough */
14883                 case 128:
14884                 default:
14885                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14886                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14887                         break;
14888                 }
14889         } else {
14890                 switch (cacheline_size) {
14891                 case 16:
14892                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14893                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14894                                         DMA_RWCTRL_WRITE_BNDRY_16);
14895                                 break;
14896                         }
14897                         /* fallthrough */
14898                 case 32:
14899                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14900                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14901                                         DMA_RWCTRL_WRITE_BNDRY_32);
14902                                 break;
14903                         }
14904                         /* fallthrough */
14905                 case 64:
14906                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14907                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14908                                         DMA_RWCTRL_WRITE_BNDRY_64);
14909                                 break;
14910                         }
14911                         /* fallthrough */
14912                 case 128:
14913                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14914                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14915                                         DMA_RWCTRL_WRITE_BNDRY_128);
14916                                 break;
14917                         }
14918                         /* fallthrough */
14919                 case 256:
14920                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14921                                 DMA_RWCTRL_WRITE_BNDRY_256);
14922                         break;
14923                 case 512:
14924                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14925                                 DMA_RWCTRL_WRITE_BNDRY_512);
14926                         break;
14927                 case 1024:
14928                 default:
14929                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14930                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14931                         break;
14932                 }
14933         }
14934
14935 out:
14936         return val;
14937 }
14938
14939 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14940 {
14941         struct tg3_internal_buffer_desc test_desc;
14942         u32 sram_dma_descs;
14943         int i, ret;
14944
14945         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14946
14947         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14948         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14949         tw32(RDMAC_STATUS, 0);
14950         tw32(WDMAC_STATUS, 0);
14951
14952         tw32(BUFMGR_MODE, 0);
14953         tw32(FTQ_RESET, 0);
14954
14955         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14956         test_desc.addr_lo = buf_dma & 0xffffffff;
14957         test_desc.nic_mbuf = 0x00002100;
14958         test_desc.len = size;
14959
14960         /*
14961          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14962          * the *second* time the tg3 driver was getting loaded after an
14963          * initial scan.
14964          *
14965          * Broadcom tells me:
14966          *   ...the DMA engine is connected to the GRC block and a DMA
14967          *   reset may affect the GRC block in some unpredictable way...
14968          *   The behavior of resets to individual blocks has not been tested.
14969          *
14970          * Broadcom noted the GRC reset will also reset all sub-components.
14971          */
14972         if (to_device) {
14973                 test_desc.cqid_sqid = (13 << 8) | 2;
14974
14975                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14976                 udelay(40);
14977         } else {
14978                 test_desc.cqid_sqid = (16 << 8) | 7;
14979
14980                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14981                 udelay(40);
14982         }
14983         test_desc.flags = 0x00000005;
14984
14985         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14986                 u32 val;
14987
14988                 val = *(((u32 *)&test_desc) + i);
14989                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14990                                        sram_dma_descs + (i * sizeof(u32)));
14991                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14992         }
14993         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14994
14995         if (to_device)
14996                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14997         else
14998                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14999
15000         ret = -ENODEV;
15001         for (i = 0; i < 40; i++) {
15002                 u32 val;
15003
15004                 if (to_device)
15005                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15006                 else
15007                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15008                 if ((val & 0xffff) == sram_dma_descs) {
15009                         ret = 0;
15010                         break;
15011                 }
15012
15013                 udelay(100);
15014         }
15015
15016         return ret;
15017 }
15018
15019 #define TEST_BUFFER_SIZE        0x2000
15020
15021 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15022         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15023         { },
15024 };
15025
15026 static int __devinit tg3_test_dma(struct tg3 *tp)
15027 {
15028         dma_addr_t buf_dma;
15029         u32 *buf, saved_dma_rwctrl;
15030         int ret = 0;
15031
15032         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15033                                  &buf_dma, GFP_KERNEL);
15034         if (!buf) {
15035                 ret = -ENOMEM;
15036                 goto out_nofree;
15037         }
15038
15039         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15040                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15041
15042         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15043
15044         if (tg3_flag(tp, 57765_PLUS))
15045                 goto out;
15046
15047         if (tg3_flag(tp, PCI_EXPRESS)) {
15048                 /* DMA read watermark not used on PCIE */
15049                 tp->dma_rwctrl |= 0x00180000;
15050         } else if (!tg3_flag(tp, PCIX_MODE)) {
15051                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15052                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15053                         tp->dma_rwctrl |= 0x003f0000;
15054                 else
15055                         tp->dma_rwctrl |= 0x003f000f;
15056         } else {
15057                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15058                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15059                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15060                         u32 read_water = 0x7;
15061
15062                         /* If the 5704 is behind the EPB bridge, we can
15063                          * do the less restrictive ONE_DMA workaround for
15064                          * better performance.
15065                          */
15066                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15067                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15068                                 tp->dma_rwctrl |= 0x8000;
15069                         else if (ccval == 0x6 || ccval == 0x7)
15070                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15071
15072                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15073                                 read_water = 4;
15074                         /* Set bit 23 to enable PCIX hw bug fix */
15075                         tp->dma_rwctrl |=
15076                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15077                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15078                                 (1 << 23);
15079                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15080                         /* 5780 always in PCIX mode */
15081                         tp->dma_rwctrl |= 0x00144000;
15082                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15083                         /* 5714 always in PCIX mode */
15084                         tp->dma_rwctrl |= 0x00148000;
15085                 } else {
15086                         tp->dma_rwctrl |= 0x001b000f;
15087                 }
15088         }
15089
15090         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15092                 tp->dma_rwctrl &= 0xfffffff0;
15093
15094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15095             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15096                 /* Remove this if it causes problems for some boards. */
15097                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15098
15099                 /* On 5700/5701 chips, we need to set this bit.
15100                  * Otherwise the chip will issue cacheline transactions
15101                  * to streamable DMA memory with not all the byte
15102                  * enables turned on.  This is an error on several
15103                  * RISC PCI controllers, in particular sparc64.
15104                  *
15105                  * On 5703/5704 chips, this bit has been reassigned
15106                  * a different meaning.  In particular, it is used
15107                  * on those chips to enable a PCI-X workaround.
15108                  */
15109                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15110         }
15111
15112         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15113
15114 #if 0
15115         /* Unneeded, already done by tg3_get_invariants.  */
15116         tg3_switch_clocks(tp);
15117 #endif
15118
15119         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15120             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15121                 goto out;
15122
15123         /* It is best to perform DMA test with maximum write burst size
15124          * to expose the 5700/5701 write DMA bug.
15125          */
15126         saved_dma_rwctrl = tp->dma_rwctrl;
15127         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15128         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15129
15130         while (1) {
15131                 u32 *p = buf, i;
15132
15133                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15134                         p[i] = i;
15135
15136                 /* Send the buffer to the chip. */
15137                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15138                 if (ret) {
15139                         dev_err(&tp->pdev->dev,
15140                                 "%s: Buffer write failed. err = %d\n",
15141                                 __func__, ret);
15142                         break;
15143                 }
15144
15145 #if 0
15146                 /* validate data reached card RAM correctly. */
15147                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15148                         u32 val;
15149                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15150                         if (le32_to_cpu(val) != p[i]) {
15151                                 dev_err(&tp->pdev->dev,
15152                                         "%s: Buffer corrupted on device! "
15153                                         "(%d != %d)\n", __func__, val, i);
15154                                 /* ret = -ENODEV here? */
15155                         }
15156                         p[i] = 0;
15157                 }
15158 #endif
15159                 /* Now read it back. */
15160                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15161                 if (ret) {
15162                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15163                                 "err = %d\n", __func__, ret);
15164                         break;
15165                 }
15166
15167                 /* Verify it. */
15168                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15169                         if (p[i] == i)
15170                                 continue;
15171
15172                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15173                             DMA_RWCTRL_WRITE_BNDRY_16) {
15174                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15175                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15176                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15177                                 break;
15178                         } else {
15179                                 dev_err(&tp->pdev->dev,
15180                                         "%s: Buffer corrupted on read back! "
15181                                         "(%d != %d)\n", __func__, p[i], i);
15182                                 ret = -ENODEV;
15183                                 goto out;
15184                         }
15185                 }
15186
15187                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15188                         /* Success. */
15189                         ret = 0;
15190                         break;
15191                 }
15192         }
15193         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15194             DMA_RWCTRL_WRITE_BNDRY_16) {
15195                 /* DMA test passed without adjusting DMA boundary,
15196                  * now look for chipsets that are known to expose the
15197                  * DMA bug without failing the test.
15198                  */
15199                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15200                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15201                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15202                 } else {
15203                         /* Safe to use the calculated DMA boundary. */
15204                         tp->dma_rwctrl = saved_dma_rwctrl;
15205                 }
15206
15207                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15208         }
15209
15210 out:
15211         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15212 out_nofree:
15213         return ret;
15214 }
15215
15216 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15217 {
15218         if (tg3_flag(tp, 57765_PLUS)) {
15219                 tp->bufmgr_config.mbuf_read_dma_low_water =
15220                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15221                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15222                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15223                 tp->bufmgr_config.mbuf_high_water =
15224                         DEFAULT_MB_HIGH_WATER_57765;
15225
15226                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15227                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15228                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15229                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15230                 tp->bufmgr_config.mbuf_high_water_jumbo =
15231                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15232         } else if (tg3_flag(tp, 5705_PLUS)) {
15233                 tp->bufmgr_config.mbuf_read_dma_low_water =
15234                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15235                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15236                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15237                 tp->bufmgr_config.mbuf_high_water =
15238                         DEFAULT_MB_HIGH_WATER_5705;
15239                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15240                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15241                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15242                         tp->bufmgr_config.mbuf_high_water =
15243                                 DEFAULT_MB_HIGH_WATER_5906;
15244                 }
15245
15246                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15247                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15248                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15249                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15250                 tp->bufmgr_config.mbuf_high_water_jumbo =
15251                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15252         } else {
15253                 tp->bufmgr_config.mbuf_read_dma_low_water =
15254                         DEFAULT_MB_RDMA_LOW_WATER;
15255                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15256                         DEFAULT_MB_MACRX_LOW_WATER;
15257                 tp->bufmgr_config.mbuf_high_water =
15258                         DEFAULT_MB_HIGH_WATER;
15259
15260                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15261                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15262                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15263                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15264                 tp->bufmgr_config.mbuf_high_water_jumbo =
15265                         DEFAULT_MB_HIGH_WATER_JUMBO;
15266         }
15267
15268         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15269         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15270 }
15271
15272 static char * __devinit tg3_phy_string(struct tg3 *tp)
15273 {
15274         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15275         case TG3_PHY_ID_BCM5400:        return "5400";
15276         case TG3_PHY_ID_BCM5401:        return "5401";
15277         case TG3_PHY_ID_BCM5411:        return "5411";
15278         case TG3_PHY_ID_BCM5701:        return "5701";
15279         case TG3_PHY_ID_BCM5703:        return "5703";
15280         case TG3_PHY_ID_BCM5704:        return "5704";
15281         case TG3_PHY_ID_BCM5705:        return "5705";
15282         case TG3_PHY_ID_BCM5750:        return "5750";
15283         case TG3_PHY_ID_BCM5752:        return "5752";
15284         case TG3_PHY_ID_BCM5714:        return "5714";
15285         case TG3_PHY_ID_BCM5780:        return "5780";
15286         case TG3_PHY_ID_BCM5755:        return "5755";
15287         case TG3_PHY_ID_BCM5787:        return "5787";
15288         case TG3_PHY_ID_BCM5784:        return "5784";
15289         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15290         case TG3_PHY_ID_BCM5906:        return "5906";
15291         case TG3_PHY_ID_BCM5761:        return "5761";
15292         case TG3_PHY_ID_BCM5718C:       return "5718C";
15293         case TG3_PHY_ID_BCM5718S:       return "5718S";
15294         case TG3_PHY_ID_BCM57765:       return "57765";
15295         case TG3_PHY_ID_BCM5719C:       return "5719C";
15296         case TG3_PHY_ID_BCM5720C:       return "5720C";
15297         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15298         case 0:                 return "serdes";
15299         default:                return "unknown";
15300         }
15301 }
15302
15303 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15304 {
15305         if (tg3_flag(tp, PCI_EXPRESS)) {
15306                 strcpy(str, "PCI Express");
15307                 return str;
15308         } else if (tg3_flag(tp, PCIX_MODE)) {
15309                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15310
15311                 strcpy(str, "PCIX:");
15312
15313                 if ((clock_ctrl == 7) ||
15314                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15315                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15316                         strcat(str, "133MHz");
15317                 else if (clock_ctrl == 0)
15318                         strcat(str, "33MHz");
15319                 else if (clock_ctrl == 2)
15320                         strcat(str, "50MHz");
15321                 else if (clock_ctrl == 4)
15322                         strcat(str, "66MHz");
15323                 else if (clock_ctrl == 6)
15324                         strcat(str, "100MHz");
15325         } else {
15326                 strcpy(str, "PCI:");
15327                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15328                         strcat(str, "66MHz");
15329                 else
15330                         strcat(str, "33MHz");
15331         }
15332         if (tg3_flag(tp, PCI_32BIT))
15333                 strcat(str, ":32-bit");
15334         else
15335                 strcat(str, ":64-bit");
15336         return str;
15337 }
15338
15339 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15340 {
15341         struct pci_dev *peer;
15342         unsigned int func, devnr = tp->pdev->devfn & ~7;
15343
15344         for (func = 0; func < 8; func++) {
15345                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15346                 if (peer && peer != tp->pdev)
15347                         break;
15348                 pci_dev_put(peer);
15349         }
15350         /* 5704 can be configured in single-port mode, set peer to
15351          * tp->pdev in that case.
15352          */
15353         if (!peer) {
15354                 peer = tp->pdev;
15355                 return peer;
15356         }
15357
15358         /*
15359          * We don't need to keep the refcount elevated; there's no way
15360          * to remove one half of this device without removing the other
15361          */
15362         pci_dev_put(peer);
15363
15364         return peer;
15365 }
15366
15367 static void __devinit tg3_init_coal(struct tg3 *tp)
15368 {
15369         struct ethtool_coalesce *ec = &tp->coal;
15370
15371         memset(ec, 0, sizeof(*ec));
15372         ec->cmd = ETHTOOL_GCOALESCE;
15373         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15374         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15375         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15376         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15377         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15378         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15379         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15380         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15381         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15382
15383         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15384                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15385                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15386                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15387                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15388                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15389         }
15390
15391         if (tg3_flag(tp, 5705_PLUS)) {
15392                 ec->rx_coalesce_usecs_irq = 0;
15393                 ec->tx_coalesce_usecs_irq = 0;
15394                 ec->stats_block_coalesce_usecs = 0;
15395         }
15396 }
15397
15398 static const struct net_device_ops tg3_netdev_ops = {
15399         .ndo_open               = tg3_open,
15400         .ndo_stop               = tg3_close,
15401         .ndo_start_xmit         = tg3_start_xmit,
15402         .ndo_get_stats64        = tg3_get_stats64,
15403         .ndo_validate_addr      = eth_validate_addr,
15404         .ndo_set_rx_mode        = tg3_set_rx_mode,
15405         .ndo_set_mac_address    = tg3_set_mac_addr,
15406         .ndo_do_ioctl           = tg3_ioctl,
15407         .ndo_tx_timeout         = tg3_tx_timeout,
15408         .ndo_change_mtu         = tg3_change_mtu,
15409         .ndo_fix_features       = tg3_fix_features,
15410         .ndo_set_features       = tg3_set_features,
15411 #ifdef CONFIG_NET_POLL_CONTROLLER
15412         .ndo_poll_controller    = tg3_poll_controller,
15413 #endif
15414 };
15415
15416 static int __devinit tg3_init_one(struct pci_dev *pdev,
15417                                   const struct pci_device_id *ent)
15418 {
15419         struct net_device *dev;
15420         struct tg3 *tp;
15421         int i, err, pm_cap;
15422         u32 sndmbx, rcvmbx, intmbx;
15423         char str[40];
15424         u64 dma_mask, persist_dma_mask;
15425         u32 features = 0;
15426
15427         printk_once(KERN_INFO "%s\n", version);
15428
15429         err = pci_enable_device(pdev);
15430         if (err) {
15431                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15432                 return err;
15433         }
15434
15435         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15436         if (err) {
15437                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15438                 goto err_out_disable_pdev;
15439         }
15440
15441         pci_set_master(pdev);
15442
15443         /* Find power-management capability. */
15444         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15445         if (pm_cap == 0) {
15446                 dev_err(&pdev->dev,
15447                         "Cannot find Power Management capability, aborting\n");
15448                 err = -EIO;
15449                 goto err_out_free_res;
15450         }
15451
15452         err = pci_set_power_state(pdev, PCI_D0);
15453         if (err) {
15454                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15455                 goto err_out_free_res;
15456         }
15457
15458         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15459         if (!dev) {
15460                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15461                 err = -ENOMEM;
15462                 goto err_out_power_down;
15463         }
15464
15465         SET_NETDEV_DEV(dev, &pdev->dev);
15466
15467         tp = netdev_priv(dev);
15468         tp->pdev = pdev;
15469         tp->dev = dev;
15470         tp->pm_cap = pm_cap;
15471         tp->rx_mode = TG3_DEF_RX_MODE;
15472         tp->tx_mode = TG3_DEF_TX_MODE;
15473         tp->irq_sync = 1;
15474
15475         if (tg3_debug > 0)
15476                 tp->msg_enable = tg3_debug;
15477         else
15478                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15479
15480         /* The word/byte swap controls here control register access byte
15481          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15482          * setting below.
15483          */
15484         tp->misc_host_ctrl =
15485                 MISC_HOST_CTRL_MASK_PCI_INT |
15486                 MISC_HOST_CTRL_WORD_SWAP |
15487                 MISC_HOST_CTRL_INDIR_ACCESS |
15488                 MISC_HOST_CTRL_PCISTATE_RW;
15489
15490         /* The NONFRM (non-frame) byte/word swap controls take effect
15491          * on descriptor entries, anything which isn't packet data.
15492          *
15493          * The StrongARM chips on the board (one for tx, one for rx)
15494          * are running in big-endian mode.
15495          */
15496         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15497                         GRC_MODE_WSWAP_NONFRM_DATA);
15498 #ifdef __BIG_ENDIAN
15499         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15500 #endif
15501         spin_lock_init(&tp->lock);
15502         spin_lock_init(&tp->indirect_lock);
15503         INIT_WORK(&tp->reset_task, tg3_reset_task);
15504
15505         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15506         if (!tp->regs) {
15507                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15508                 err = -ENOMEM;
15509                 goto err_out_free_dev;
15510         }
15511
15512         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15513             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15514             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15515             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15516             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15517             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15518             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15519             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15520                 tg3_flag_set(tp, ENABLE_APE);
15521                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15522                 if (!tp->aperegs) {
15523                         dev_err(&pdev->dev,
15524                                 "Cannot map APE registers, aborting\n");
15525                         err = -ENOMEM;
15526                         goto err_out_iounmap;
15527                 }
15528         }
15529
15530         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15531         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15532
15533         dev->ethtool_ops = &tg3_ethtool_ops;
15534         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15535         dev->netdev_ops = &tg3_netdev_ops;
15536         dev->irq = pdev->irq;
15537
15538         err = tg3_get_invariants(tp);
15539         if (err) {
15540                 dev_err(&pdev->dev,
15541                         "Problem fetching invariants of chip, aborting\n");
15542                 goto err_out_apeunmap;
15543         }
15544
15545         /* The EPB bridge inside 5714, 5715, and 5780 and any
15546          * device behind the EPB cannot support DMA addresses > 40-bit.
15547          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15548          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15549          * do DMA address check in tg3_start_xmit().
15550          */
15551         if (tg3_flag(tp, IS_5788))
15552                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15553         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15554                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15555 #ifdef CONFIG_HIGHMEM
15556                 dma_mask = DMA_BIT_MASK(64);
15557 #endif
15558         } else
15559                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15560
15561         /* Configure DMA attributes. */
15562         if (dma_mask > DMA_BIT_MASK(32)) {
15563                 err = pci_set_dma_mask(pdev, dma_mask);
15564                 if (!err) {
15565                         features |= NETIF_F_HIGHDMA;
15566                         err = pci_set_consistent_dma_mask(pdev,
15567                                                           persist_dma_mask);
15568                         if (err < 0) {
15569                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15570                                         "DMA for consistent allocations\n");
15571                                 goto err_out_apeunmap;
15572                         }
15573                 }
15574         }
15575         if (err || dma_mask == DMA_BIT_MASK(32)) {
15576                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15577                 if (err) {
15578                         dev_err(&pdev->dev,
15579                                 "No usable DMA configuration, aborting\n");
15580                         goto err_out_apeunmap;
15581                 }
15582         }
15583
15584         tg3_init_bufmgr_config(tp);
15585
15586         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15587
15588         /* 5700 B0 chips do not support checksumming correctly due
15589          * to hardware bugs.
15590          */
15591         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15592                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15593
15594                 if (tg3_flag(tp, 5755_PLUS))
15595                         features |= NETIF_F_IPV6_CSUM;
15596         }
15597
15598         /* TSO is on by default on chips that support hardware TSO.
15599          * Firmware TSO on older chips gives lower performance, so it
15600          * is off by default, but can be enabled using ethtool.
15601          */
15602         if ((tg3_flag(tp, HW_TSO_1) ||
15603              tg3_flag(tp, HW_TSO_2) ||
15604              tg3_flag(tp, HW_TSO_3)) &&
15605             (features & NETIF_F_IP_CSUM))
15606                 features |= NETIF_F_TSO;
15607         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15608                 if (features & NETIF_F_IPV6_CSUM)
15609                         features |= NETIF_F_TSO6;
15610                 if (tg3_flag(tp, HW_TSO_3) ||
15611                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15612                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15613                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15614                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15615                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15616                         features |= NETIF_F_TSO_ECN;
15617         }
15618
15619         dev->features |= features;
15620         dev->vlan_features |= features;
15621
15622         /*
15623          * Add loopback capability only for a subset of devices that support
15624          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15625          * loopback for the remaining devices.
15626          */
15627         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15628             !tg3_flag(tp, CPMU_PRESENT))
15629                 /* Add the loopback capability */
15630                 features |= NETIF_F_LOOPBACK;
15631
15632         dev->hw_features |= features;
15633
15634         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15635             !tg3_flag(tp, TSO_CAPABLE) &&
15636             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15637                 tg3_flag_set(tp, MAX_RXPEND_64);
15638                 tp->rx_pending = 63;
15639         }
15640
15641         err = tg3_get_device_address(tp);
15642         if (err) {
15643                 dev_err(&pdev->dev,
15644                         "Could not obtain valid ethernet address, aborting\n");
15645                 goto err_out_apeunmap;
15646         }
15647
15648         /*
15649          * Reset chip in case UNDI or EFI driver did not shutdown
15650          * DMA self test will enable WDMAC and we'll see (spurious)
15651          * pending DMA on the PCI bus at that point.
15652          */
15653         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15654             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15655                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15656                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15657         }
15658
15659         err = tg3_test_dma(tp);
15660         if (err) {
15661                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15662                 goto err_out_apeunmap;
15663         }
15664
15665         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15666         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15667         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15668         for (i = 0; i < tp->irq_max; i++) {
15669                 struct tg3_napi *tnapi = &tp->napi[i];
15670
15671                 tnapi->tp = tp;
15672                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15673
15674                 tnapi->int_mbox = intmbx;
15675                 if (i <= 4)
15676                         intmbx += 0x8;
15677                 else
15678                         intmbx += 0x4;
15679
15680                 tnapi->consmbox = rcvmbx;
15681                 tnapi->prodmbox = sndmbx;
15682
15683                 if (i)
15684                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15685                 else
15686                         tnapi->coal_now = HOSTCC_MODE_NOW;
15687
15688                 if (!tg3_flag(tp, SUPPORT_MSIX))
15689                         break;
15690
15691                 /*
15692                  * If we support MSIX, we'll be using RSS.  If we're using
15693                  * RSS, the first vector only handles link interrupts and the
15694                  * remaining vectors handle rx and tx interrupts.  Reuse the
15695                  * mailbox values for the next iteration.  The values we setup
15696                  * above are still useful for the single vectored mode.
15697                  */
15698                 if (!i)
15699                         continue;
15700
15701                 rcvmbx += 0x8;
15702
15703                 if (sndmbx & 0x4)
15704                         sndmbx -= 0x4;
15705                 else
15706                         sndmbx += 0xc;
15707         }
15708
15709         tg3_init_coal(tp);
15710
15711         pci_set_drvdata(pdev, dev);
15712
15713         if (tg3_flag(tp, 5717_PLUS)) {
15714                 /* Resume a low-power mode */
15715                 tg3_frob_aux_power(tp, false);
15716         }
15717
15718         err = register_netdev(dev);
15719         if (err) {
15720                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15721                 goto err_out_apeunmap;
15722         }
15723
15724         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15725                     tp->board_part_number,
15726                     tp->pci_chip_rev_id,
15727                     tg3_bus_string(tp, str),
15728                     dev->dev_addr);
15729
15730         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15731                 struct phy_device *phydev;
15732                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15733                 netdev_info(dev,
15734                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15735                             phydev->drv->name, dev_name(&phydev->dev));
15736         } else {
15737                 char *ethtype;
15738
15739                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15740                         ethtype = "10/100Base-TX";
15741                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15742                         ethtype = "1000Base-SX";
15743                 else
15744                         ethtype = "10/100/1000Base-T";
15745
15746                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15747                             "(WireSpeed[%d], EEE[%d])\n",
15748                             tg3_phy_string(tp), ethtype,
15749                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15750                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15751         }
15752
15753         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15754                     (dev->features & NETIF_F_RXCSUM) != 0,
15755                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15756                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15757                     tg3_flag(tp, ENABLE_ASF) != 0,
15758                     tg3_flag(tp, TSO_CAPABLE) != 0);
15759         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15760                     tp->dma_rwctrl,
15761                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15762                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15763
15764         pci_save_state(pdev);
15765
15766         return 0;
15767
15768 err_out_apeunmap:
15769         if (tp->aperegs) {
15770                 iounmap(tp->aperegs);
15771                 tp->aperegs = NULL;
15772         }
15773
15774 err_out_iounmap:
15775         if (tp->regs) {
15776                 iounmap(tp->regs);
15777                 tp->regs = NULL;
15778         }
15779
15780 err_out_free_dev:
15781         free_netdev(dev);
15782
15783 err_out_power_down:
15784         pci_set_power_state(pdev, PCI_D3hot);
15785
15786 err_out_free_res:
15787         pci_release_regions(pdev);
15788
15789 err_out_disable_pdev:
15790         pci_disable_device(pdev);
15791         pci_set_drvdata(pdev, NULL);
15792         return err;
15793 }
15794
15795 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15796 {
15797         struct net_device *dev = pci_get_drvdata(pdev);
15798
15799         if (dev) {
15800                 struct tg3 *tp = netdev_priv(dev);
15801
15802                 if (tp->fw)
15803                         release_firmware(tp->fw);
15804
15805                 tg3_reset_task_cancel(tp);
15806
15807                 if (tg3_flag(tp, USE_PHYLIB)) {
15808                         tg3_phy_fini(tp);
15809                         tg3_mdio_fini(tp);
15810                 }
15811
15812                 unregister_netdev(dev);
15813                 if (tp->aperegs) {
15814                         iounmap(tp->aperegs);
15815                         tp->aperegs = NULL;
15816                 }
15817                 if (tp->regs) {
15818                         iounmap(tp->regs);
15819                         tp->regs = NULL;
15820                 }
15821                 free_netdev(dev);
15822                 pci_release_regions(pdev);
15823                 pci_disable_device(pdev);
15824                 pci_set_drvdata(pdev, NULL);
15825         }
15826 }
15827
15828 #ifdef CONFIG_PM_SLEEP
15829 static int tg3_suspend(struct device *device)
15830 {
15831         struct pci_dev *pdev = to_pci_dev(device);
15832         struct net_device *dev = pci_get_drvdata(pdev);
15833         struct tg3 *tp = netdev_priv(dev);
15834         int err;
15835
15836         if (!netif_running(dev))
15837                 return 0;
15838
15839         tg3_reset_task_cancel(tp);
15840         tg3_phy_stop(tp);
15841         tg3_netif_stop(tp);
15842
15843         del_timer_sync(&tp->timer);
15844
15845         tg3_full_lock(tp, 1);
15846         tg3_disable_ints(tp);
15847         tg3_full_unlock(tp);
15848
15849         netif_device_detach(dev);
15850
15851         tg3_full_lock(tp, 0);
15852         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15853         tg3_flag_clear(tp, INIT_COMPLETE);
15854         tg3_full_unlock(tp);
15855
15856         err = tg3_power_down_prepare(tp);
15857         if (err) {
15858                 int err2;
15859
15860                 tg3_full_lock(tp, 0);
15861
15862                 tg3_flag_set(tp, INIT_COMPLETE);
15863                 err2 = tg3_restart_hw(tp, 1);
15864                 if (err2)
15865                         goto out;
15866
15867                 tp->timer.expires = jiffies + tp->timer_offset;
15868                 add_timer(&tp->timer);
15869
15870                 netif_device_attach(dev);
15871                 tg3_netif_start(tp);
15872
15873 out:
15874                 tg3_full_unlock(tp);
15875
15876                 if (!err2)
15877                         tg3_phy_start(tp);
15878         }
15879
15880         return err;
15881 }
15882
15883 static int tg3_resume(struct device *device)
15884 {
15885         struct pci_dev *pdev = to_pci_dev(device);
15886         struct net_device *dev = pci_get_drvdata(pdev);
15887         struct tg3 *tp = netdev_priv(dev);
15888         int err;
15889
15890         if (!netif_running(dev))
15891                 return 0;
15892
15893         netif_device_attach(dev);
15894
15895         tg3_full_lock(tp, 0);
15896
15897         tg3_flag_set(tp, INIT_COMPLETE);
15898         err = tg3_restart_hw(tp, 1);
15899         if (err)
15900                 goto out;
15901
15902         tp->timer.expires = jiffies + tp->timer_offset;
15903         add_timer(&tp->timer);
15904
15905         tg3_netif_start(tp);
15906
15907 out:
15908         tg3_full_unlock(tp);
15909
15910         if (!err)
15911                 tg3_phy_start(tp);
15912
15913         return err;
15914 }
15915
15916 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15917 #define TG3_PM_OPS (&tg3_pm_ops)
15918
15919 #else
15920
15921 #define TG3_PM_OPS NULL
15922
15923 #endif /* CONFIG_PM_SLEEP */
15924
15925 /**
15926  * tg3_io_error_detected - called when PCI error is detected
15927  * @pdev: Pointer to PCI device
15928  * @state: The current pci connection state
15929  *
15930  * This function is called after a PCI bus error affecting
15931  * this device has been detected.
15932  */
15933 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15934                                               pci_channel_state_t state)
15935 {
15936         struct net_device *netdev = pci_get_drvdata(pdev);
15937         struct tg3 *tp = netdev_priv(netdev);
15938         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15939
15940         netdev_info(netdev, "PCI I/O error detected\n");
15941
15942         rtnl_lock();
15943
15944         if (!netif_running(netdev))
15945                 goto done;
15946
15947         tg3_phy_stop(tp);
15948
15949         tg3_netif_stop(tp);
15950
15951         del_timer_sync(&tp->timer);
15952
15953         /* Want to make sure that the reset task doesn't run */
15954         tg3_reset_task_cancel(tp);
15955         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15956
15957         netif_device_detach(netdev);
15958
15959         /* Clean up software state, even if MMIO is blocked */
15960         tg3_full_lock(tp, 0);
15961         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15962         tg3_full_unlock(tp);
15963
15964 done:
15965         if (state == pci_channel_io_perm_failure)
15966                 err = PCI_ERS_RESULT_DISCONNECT;
15967         else
15968                 pci_disable_device(pdev);
15969
15970         rtnl_unlock();
15971
15972         return err;
15973 }
15974
15975 /**
15976  * tg3_io_slot_reset - called after the pci bus has been reset.
15977  * @pdev: Pointer to PCI device
15978  *
15979  * Restart the card from scratch, as if from a cold-boot.
15980  * At this point, the card has exprienced a hard reset,
15981  * followed by fixups by BIOS, and has its config space
15982  * set up identically to what it was at cold boot.
15983  */
15984 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15985 {
15986         struct net_device *netdev = pci_get_drvdata(pdev);
15987         struct tg3 *tp = netdev_priv(netdev);
15988         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15989         int err;
15990
15991         rtnl_lock();
15992
15993         if (pci_enable_device(pdev)) {
15994                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15995                 goto done;
15996         }
15997
15998         pci_set_master(pdev);
15999         pci_restore_state(pdev);
16000         pci_save_state(pdev);
16001
16002         if (!netif_running(netdev)) {
16003                 rc = PCI_ERS_RESULT_RECOVERED;
16004                 goto done;
16005         }
16006
16007         err = tg3_power_up(tp);
16008         if (err)
16009                 goto done;
16010
16011         rc = PCI_ERS_RESULT_RECOVERED;
16012
16013 done:
16014         rtnl_unlock();
16015
16016         return rc;
16017 }
16018
16019 /**
16020  * tg3_io_resume - called when traffic can start flowing again.
16021  * @pdev: Pointer to PCI device
16022  *
16023  * This callback is called when the error recovery driver tells
16024  * us that its OK to resume normal operation.
16025  */
16026 static void tg3_io_resume(struct pci_dev *pdev)
16027 {
16028         struct net_device *netdev = pci_get_drvdata(pdev);
16029         struct tg3 *tp = netdev_priv(netdev);
16030         int err;
16031
16032         rtnl_lock();
16033
16034         if (!netif_running(netdev))
16035                 goto done;
16036
16037         tg3_full_lock(tp, 0);
16038         tg3_flag_set(tp, INIT_COMPLETE);
16039         err = tg3_restart_hw(tp, 1);
16040         tg3_full_unlock(tp);
16041         if (err) {
16042                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16043                 goto done;
16044         }
16045
16046         netif_device_attach(netdev);
16047
16048         tp->timer.expires = jiffies + tp->timer_offset;
16049         add_timer(&tp->timer);
16050
16051         tg3_netif_start(tp);
16052
16053         tg3_phy_start(tp);
16054
16055 done:
16056         rtnl_unlock();
16057 }
16058
16059 static struct pci_error_handlers tg3_err_handler = {
16060         .error_detected = tg3_io_error_detected,
16061         .slot_reset     = tg3_io_slot_reset,
16062         .resume         = tg3_io_resume
16063 };
16064
16065 static struct pci_driver tg3_driver = {
16066         .name           = DRV_MODULE_NAME,
16067         .id_table       = tg3_pci_tbl,
16068         .probe          = tg3_init_one,
16069         .remove         = __devexit_p(tg3_remove_one),
16070         .err_handler    = &tg3_err_handler,
16071         .driver.pm      = TG3_PM_OPS,
16072 };
16073
16074 static int __init tg3_init(void)
16075 {
16076         return pci_register_driver(&tg3_driver);
16077 }
16078
16079 static void __exit tg3_cleanup(void)
16080 {
16081         pci_unregister_driver(&tg3_driver);
16082 }
16083
16084 module_init(tg3_init);
16085 module_exit(tg3_cleanup);