tg3: Skip powering down function 0 on certain serdes devices
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     121
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "November 2, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882
883         /* check for TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885                 work_exists = 1;
886
887         /* check for RX work to do */
888         if (tnapi->rx_rcb_prod_idx &&
889             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
890                 work_exists = 1;
891
892         return work_exists;
893 }
894
895 /* tg3_int_reenable
896  *  similar to tg3_enable_ints, but it accurately determines whether there
897  *  is new work pending and can return without flushing the PIO write
898  *  which reenables interrupts
899  */
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
901 {
902         struct tg3 *tp = tnapi->tp;
903
904         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
905         mmiowb();
906
907         /* When doing tagged status, this work check is unnecessary.
908          * The last_tag we write above tells the chip which piece of
909          * work we've completed.
910          */
911         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912                 tw32(HOSTCC_MODE, tp->coalesce_mode |
913                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
914 }
915
916 static void tg3_switch_clocks(struct tg3 *tp)
917 {
918         u32 clock_ctrl;
919         u32 orig_clock_ctrl;
920
921         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
922                 return;
923
924         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
925
926         orig_clock_ctrl = clock_ctrl;
927         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928                        CLOCK_CTRL_CLKRUN_OENABLE |
929                        0x1f);
930         tp->pci_clock_ctrl = clock_ctrl;
931
932         if (tg3_flag(tp, 5705_PLUS)) {
933                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
936                 }
937         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl |
940                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
941                             40);
942                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
944                             40);
945         }
946         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
947 }
948
949 #define PHY_BUSY_LOOPS  5000
950
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
952 {
953         u32 frame_val;
954         unsigned int loops;
955         int ret;
956
957         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
958                 tw32_f(MAC_MI_MODE,
959                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
960                 udelay(80);
961         }
962
963         *val = 0x0;
964
965         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966                       MI_COM_PHY_ADDR_MASK);
967         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968                       MI_COM_REG_ADDR_MASK);
969         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
970
971         tw32_f(MAC_MI_COM, frame_val);
972
973         loops = PHY_BUSY_LOOPS;
974         while (loops != 0) {
975                 udelay(10);
976                 frame_val = tr32(MAC_MI_COM);
977
978                 if ((frame_val & MI_COM_BUSY) == 0) {
979                         udelay(5);
980                         frame_val = tr32(MAC_MI_COM);
981                         break;
982                 }
983                 loops -= 1;
984         }
985
986         ret = -EBUSY;
987         if (loops != 0) {
988                 *val = frame_val & MI_COM_DATA_MASK;
989                 ret = 0;
990         }
991
992         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993                 tw32_f(MAC_MI_MODE, tp->mi_mode);
994                 udelay(80);
995         }
996
997         return ret;
998 }
999
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1001 {
1002         u32 frame_val;
1003         unsigned int loops;
1004         int ret;
1005
1006         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1008                 return 0;
1009
1010         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1011                 tw32_f(MAC_MI_MODE,
1012                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1013                 udelay(80);
1014         }
1015
1016         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017                       MI_COM_PHY_ADDR_MASK);
1018         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019                       MI_COM_REG_ADDR_MASK);
1020         frame_val |= (val & MI_COM_DATA_MASK);
1021         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1022
1023         tw32_f(MAC_MI_COM, frame_val);
1024
1025         loops = PHY_BUSY_LOOPS;
1026         while (loops != 0) {
1027                 udelay(10);
1028                 frame_val = tr32(MAC_MI_COM);
1029                 if ((frame_val & MI_COM_BUSY) == 0) {
1030                         udelay(5);
1031                         frame_val = tr32(MAC_MI_COM);
1032                         break;
1033                 }
1034                 loops -= 1;
1035         }
1036
1037         ret = -EBUSY;
1038         if (loops != 0)
1039                 ret = 0;
1040
1041         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1043                 udelay(80);
1044         }
1045
1046         return ret;
1047 }
1048
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1050 {
1051         int err;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1063         if (err)
1064                 goto done;
1065
1066         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1067
1068 done:
1069         return err;
1070 }
1071
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1073 {
1074         int err;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1086         if (err)
1087                 goto done;
1088
1089         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1090
1091 done:
1092         return err;
1093 }
1094
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1096 {
1097         int err;
1098
1099         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1100         if (!err)
1101                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102
1103         return err;
1104 }
1105
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1107 {
1108         int err;
1109
1110         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1111         if (!err)
1112                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113
1114         return err;
1115 }
1116
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1118 {
1119         int err;
1120
1121         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1124         if (!err)
1125                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126
1127         return err;
1128 }
1129
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1131 {
1132         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133                 set |= MII_TG3_AUXCTL_MISC_WREN;
1134
1135         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1136 }
1137
1138 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1139 {
1140         u32 val;
1141         int err;
1142
1143         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1144
1145         if (err)
1146                 return err;
1147         if (enable)
1148
1149                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1150         else
1151                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1152
1153         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1154                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1155
1156         return err;
1157 }
1158
1159 static int tg3_bmcr_reset(struct tg3 *tp)
1160 {
1161         u32 phy_control;
1162         int limit, err;
1163
1164         /* OK, reset it, and poll the BMCR_RESET bit until it
1165          * clears or we time out.
1166          */
1167         phy_control = BMCR_RESET;
1168         err = tg3_writephy(tp, MII_BMCR, phy_control);
1169         if (err != 0)
1170                 return -EBUSY;
1171
1172         limit = 5000;
1173         while (limit--) {
1174                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1175                 if (err != 0)
1176                         return -EBUSY;
1177
1178                 if ((phy_control & BMCR_RESET) == 0) {
1179                         udelay(40);
1180                         break;
1181                 }
1182                 udelay(10);
1183         }
1184         if (limit < 0)
1185                 return -EBUSY;
1186
1187         return 0;
1188 }
1189
1190 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1191 {
1192         struct tg3 *tp = bp->priv;
1193         u32 val;
1194
1195         spin_lock_bh(&tp->lock);
1196
1197         if (tg3_readphy(tp, reg, &val))
1198                 val = -EIO;
1199
1200         spin_unlock_bh(&tp->lock);
1201
1202         return val;
1203 }
1204
1205 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1206 {
1207         struct tg3 *tp = bp->priv;
1208         u32 ret = 0;
1209
1210         spin_lock_bh(&tp->lock);
1211
1212         if (tg3_writephy(tp, reg, val))
1213                 ret = -EIO;
1214
1215         spin_unlock_bh(&tp->lock);
1216
1217         return ret;
1218 }
1219
1220 static int tg3_mdio_reset(struct mii_bus *bp)
1221 {
1222         return 0;
1223 }
1224
1225 static void tg3_mdio_config_5785(struct tg3 *tp)
1226 {
1227         u32 val;
1228         struct phy_device *phydev;
1229
1230         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1231         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1232         case PHY_ID_BCM50610:
1233         case PHY_ID_BCM50610M:
1234                 val = MAC_PHYCFG2_50610_LED_MODES;
1235                 break;
1236         case PHY_ID_BCMAC131:
1237                 val = MAC_PHYCFG2_AC131_LED_MODES;
1238                 break;
1239         case PHY_ID_RTL8211C:
1240                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1241                 break;
1242         case PHY_ID_RTL8201E:
1243                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1244                 break;
1245         default:
1246                 return;
1247         }
1248
1249         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1250                 tw32(MAC_PHYCFG2, val);
1251
1252                 val = tr32(MAC_PHYCFG1);
1253                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1254                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1255                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1256                 tw32(MAC_PHYCFG1, val);
1257
1258                 return;
1259         }
1260
1261         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1262                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1263                        MAC_PHYCFG2_FMODE_MASK_MASK |
1264                        MAC_PHYCFG2_GMODE_MASK_MASK |
1265                        MAC_PHYCFG2_ACT_MASK_MASK   |
1266                        MAC_PHYCFG2_QUAL_MASK_MASK |
1267                        MAC_PHYCFG2_INBAND_ENABLE;
1268
1269         tw32(MAC_PHYCFG2, val);
1270
1271         val = tr32(MAC_PHYCFG1);
1272         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1273                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1274         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1275                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1276                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1277                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1278                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1279         }
1280         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1281                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1282         tw32(MAC_PHYCFG1, val);
1283
1284         val = tr32(MAC_EXT_RGMII_MODE);
1285         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1286                  MAC_RGMII_MODE_RX_QUALITY |
1287                  MAC_RGMII_MODE_RX_ACTIVITY |
1288                  MAC_RGMII_MODE_RX_ENG_DET |
1289                  MAC_RGMII_MODE_TX_ENABLE |
1290                  MAC_RGMII_MODE_TX_LOWPWR |
1291                  MAC_RGMII_MODE_TX_RESET);
1292         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1293                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1294                         val |= MAC_RGMII_MODE_RX_INT_B |
1295                                MAC_RGMII_MODE_RX_QUALITY |
1296                                MAC_RGMII_MODE_RX_ACTIVITY |
1297                                MAC_RGMII_MODE_RX_ENG_DET;
1298                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1299                         val |= MAC_RGMII_MODE_TX_ENABLE |
1300                                MAC_RGMII_MODE_TX_LOWPWR |
1301                                MAC_RGMII_MODE_TX_RESET;
1302         }
1303         tw32(MAC_EXT_RGMII_MODE, val);
1304 }
1305
1306 static void tg3_mdio_start(struct tg3 *tp)
1307 {
1308         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1309         tw32_f(MAC_MI_MODE, tp->mi_mode);
1310         udelay(80);
1311
1312         if (tg3_flag(tp, MDIOBUS_INITED) &&
1313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1314                 tg3_mdio_config_5785(tp);
1315 }
1316
1317 static int tg3_mdio_init(struct tg3 *tp)
1318 {
1319         int i;
1320         u32 reg;
1321         struct phy_device *phydev;
1322
1323         if (tg3_flag(tp, 5717_PLUS)) {
1324                 u32 is_serdes;
1325
1326                 tp->phy_addr = tp->pci_fn + 1;
1327
1328                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1329                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1330                 else
1331                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1332                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1333                 if (is_serdes)
1334                         tp->phy_addr += 7;
1335         } else
1336                 tp->phy_addr = TG3_PHY_MII_ADDR;
1337
1338         tg3_mdio_start(tp);
1339
1340         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1341                 return 0;
1342
1343         tp->mdio_bus = mdiobus_alloc();
1344         if (tp->mdio_bus == NULL)
1345                 return -ENOMEM;
1346
1347         tp->mdio_bus->name     = "tg3 mdio bus";
1348         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1349                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1350         tp->mdio_bus->priv     = tp;
1351         tp->mdio_bus->parent   = &tp->pdev->dev;
1352         tp->mdio_bus->read     = &tg3_mdio_read;
1353         tp->mdio_bus->write    = &tg3_mdio_write;
1354         tp->mdio_bus->reset    = &tg3_mdio_reset;
1355         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1356         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1357
1358         for (i = 0; i < PHY_MAX_ADDR; i++)
1359                 tp->mdio_bus->irq[i] = PHY_POLL;
1360
1361         /* The bus registration will look for all the PHYs on the mdio bus.
1362          * Unfortunately, it does not ensure the PHY is powered up before
1363          * accessing the PHY ID registers.  A chip reset is the
1364          * quickest way to bring the device back to an operational state..
1365          */
1366         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1367                 tg3_bmcr_reset(tp);
1368
1369         i = mdiobus_register(tp->mdio_bus);
1370         if (i) {
1371                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1372                 mdiobus_free(tp->mdio_bus);
1373                 return i;
1374         }
1375
1376         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1377
1378         if (!phydev || !phydev->drv) {
1379                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1380                 mdiobus_unregister(tp->mdio_bus);
1381                 mdiobus_free(tp->mdio_bus);
1382                 return -ENODEV;
1383         }
1384
1385         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1386         case PHY_ID_BCM57780:
1387                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1388                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1389                 break;
1390         case PHY_ID_BCM50610:
1391         case PHY_ID_BCM50610M:
1392                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1393                                      PHY_BRCM_RX_REFCLK_UNUSED |
1394                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1395                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1396                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1397                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1399                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1400                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1401                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1402                 /* fallthru */
1403         case PHY_ID_RTL8211C:
1404                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1405                 break;
1406         case PHY_ID_RTL8201E:
1407         case PHY_ID_BCMAC131:
1408                 phydev->interface = PHY_INTERFACE_MODE_MII;
1409                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1410                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1411                 break;
1412         }
1413
1414         tg3_flag_set(tp, MDIOBUS_INITED);
1415
1416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1417                 tg3_mdio_config_5785(tp);
1418
1419         return 0;
1420 }
1421
1422 static void tg3_mdio_fini(struct tg3 *tp)
1423 {
1424         if (tg3_flag(tp, MDIOBUS_INITED)) {
1425                 tg3_flag_clear(tp, MDIOBUS_INITED);
1426                 mdiobus_unregister(tp->mdio_bus);
1427                 mdiobus_free(tp->mdio_bus);
1428         }
1429 }
1430
1431 /* tp->lock is held. */
1432 static inline void tg3_generate_fw_event(struct tg3 *tp)
1433 {
1434         u32 val;
1435
1436         val = tr32(GRC_RX_CPU_EVENT);
1437         val |= GRC_RX_CPU_DRIVER_EVENT;
1438         tw32_f(GRC_RX_CPU_EVENT, val);
1439
1440         tp->last_event_jiffies = jiffies;
1441 }
1442
1443 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1444
1445 /* tp->lock is held. */
1446 static void tg3_wait_for_event_ack(struct tg3 *tp)
1447 {
1448         int i;
1449         unsigned int delay_cnt;
1450         long time_remain;
1451
1452         /* If enough time has passed, no wait is necessary. */
1453         time_remain = (long)(tp->last_event_jiffies + 1 +
1454                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1455                       (long)jiffies;
1456         if (time_remain < 0)
1457                 return;
1458
1459         /* Check if we can shorten the wait time. */
1460         delay_cnt = jiffies_to_usecs(time_remain);
1461         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1462                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1463         delay_cnt = (delay_cnt >> 3) + 1;
1464
1465         for (i = 0; i < delay_cnt; i++) {
1466                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1467                         break;
1468                 udelay(8);
1469         }
1470 }
1471
1472 /* tp->lock is held. */
1473 static void tg3_ump_link_report(struct tg3 *tp)
1474 {
1475         u32 reg;
1476         u32 val;
1477
1478         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1479                 return;
1480
1481         tg3_wait_for_event_ack(tp);
1482
1483         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1484
1485         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1486
1487         val = 0;
1488         if (!tg3_readphy(tp, MII_BMCR, &reg))
1489                 val = reg << 16;
1490         if (!tg3_readphy(tp, MII_BMSR, &reg))
1491                 val |= (reg & 0xffff);
1492         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1493
1494         val = 0;
1495         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1496                 val = reg << 16;
1497         if (!tg3_readphy(tp, MII_LPA, &reg))
1498                 val |= (reg & 0xffff);
1499         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1500
1501         val = 0;
1502         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1503                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1504                         val = reg << 16;
1505                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1506                         val |= (reg & 0xffff);
1507         }
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1509
1510         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1511                 val = reg << 16;
1512         else
1513                 val = 0;
1514         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1515
1516         tg3_generate_fw_event(tp);
1517 }
1518
1519 /* tp->lock is held. */
1520 static void tg3_stop_fw(struct tg3 *tp)
1521 {
1522         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1523                 /* Wait for RX cpu to ACK the previous event. */
1524                 tg3_wait_for_event_ack(tp);
1525
1526                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1527
1528                 tg3_generate_fw_event(tp);
1529
1530                 /* Wait for RX cpu to ACK this event. */
1531                 tg3_wait_for_event_ack(tp);
1532         }
1533 }
1534
1535 /* tp->lock is held. */
1536 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1537 {
1538         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1539                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1540
1541         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1542                 switch (kind) {
1543                 case RESET_KIND_INIT:
1544                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1545                                       DRV_STATE_START);
1546                         break;
1547
1548                 case RESET_KIND_SHUTDOWN:
1549                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1550                                       DRV_STATE_UNLOAD);
1551                         break;
1552
1553                 case RESET_KIND_SUSPEND:
1554                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1555                                       DRV_STATE_SUSPEND);
1556                         break;
1557
1558                 default:
1559                         break;
1560                 }
1561         }
1562
1563         if (kind == RESET_KIND_INIT ||
1564             kind == RESET_KIND_SUSPEND)
1565                 tg3_ape_driver_state_change(tp, kind);
1566 }
1567
1568 /* tp->lock is held. */
1569 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1570 {
1571         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1572                 switch (kind) {
1573                 case RESET_KIND_INIT:
1574                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1575                                       DRV_STATE_START_DONE);
1576                         break;
1577
1578                 case RESET_KIND_SHUTDOWN:
1579                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580                                       DRV_STATE_UNLOAD_DONE);
1581                         break;
1582
1583                 default:
1584                         break;
1585                 }
1586         }
1587
1588         if (kind == RESET_KIND_SHUTDOWN)
1589                 tg3_ape_driver_state_change(tp, kind);
1590 }
1591
1592 /* tp->lock is held. */
1593 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1594 {
1595         if (tg3_flag(tp, ENABLE_ASF)) {
1596                 switch (kind) {
1597                 case RESET_KIND_INIT:
1598                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1599                                       DRV_STATE_START);
1600                         break;
1601
1602                 case RESET_KIND_SHUTDOWN:
1603                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1604                                       DRV_STATE_UNLOAD);
1605                         break;
1606
1607                 case RESET_KIND_SUSPEND:
1608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1609                                       DRV_STATE_SUSPEND);
1610                         break;
1611
1612                 default:
1613                         break;
1614                 }
1615         }
1616 }
1617
1618 static int tg3_poll_fw(struct tg3 *tp)
1619 {
1620         int i;
1621         u32 val;
1622
1623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1624                 /* Wait up to 20ms for init done. */
1625                 for (i = 0; i < 200; i++) {
1626                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1627                                 return 0;
1628                         udelay(100);
1629                 }
1630                 return -ENODEV;
1631         }
1632
1633         /* Wait for firmware initialization to complete. */
1634         for (i = 0; i < 100000; i++) {
1635                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1636                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1637                         break;
1638                 udelay(10);
1639         }
1640
1641         /* Chip might not be fitted with firmware.  Some Sun onboard
1642          * parts are configured like that.  So don't signal the timeout
1643          * of the above loop as an error, but do report the lack of
1644          * running firmware once.
1645          */
1646         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1647                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1648
1649                 netdev_info(tp->dev, "No firmware running\n");
1650         }
1651
1652         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1653                 /* The 57765 A0 needs a little more
1654                  * time to do some important work.
1655                  */
1656                 mdelay(10);
1657         }
1658
1659         return 0;
1660 }
1661
1662 static void tg3_link_report(struct tg3 *tp)
1663 {
1664         if (!netif_carrier_ok(tp->dev)) {
1665                 netif_info(tp, link, tp->dev, "Link is down\n");
1666                 tg3_ump_link_report(tp);
1667         } else if (netif_msg_link(tp)) {
1668                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1669                             (tp->link_config.active_speed == SPEED_1000 ?
1670                              1000 :
1671                              (tp->link_config.active_speed == SPEED_100 ?
1672                               100 : 10)),
1673                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1674                              "full" : "half"));
1675
1676                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1677                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1678                             "on" : "off",
1679                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1680                             "on" : "off");
1681
1682                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1683                         netdev_info(tp->dev, "EEE is %s\n",
1684                                     tp->setlpicnt ? "enabled" : "disabled");
1685
1686                 tg3_ump_link_report(tp);
1687         }
1688 }
1689
1690 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1691 {
1692         u16 miireg;
1693
1694         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1695                 miireg = ADVERTISE_PAUSE_CAP;
1696         else if (flow_ctrl & FLOW_CTRL_TX)
1697                 miireg = ADVERTISE_PAUSE_ASYM;
1698         else if (flow_ctrl & FLOW_CTRL_RX)
1699                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1700         else
1701                 miireg = 0;
1702
1703         return miireg;
1704 }
1705
1706 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1707 {
1708         u16 miireg;
1709
1710         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1711                 miireg = ADVERTISE_1000XPAUSE;
1712         else if (flow_ctrl & FLOW_CTRL_TX)
1713                 miireg = ADVERTISE_1000XPSE_ASYM;
1714         else if (flow_ctrl & FLOW_CTRL_RX)
1715                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1716         else
1717                 miireg = 0;
1718
1719         return miireg;
1720 }
1721
1722 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1723 {
1724         u8 cap = 0;
1725
1726         if (lcladv & ADVERTISE_1000XPAUSE) {
1727                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1728                         if (rmtadv & LPA_1000XPAUSE)
1729                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1730                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1731                                 cap = FLOW_CTRL_RX;
1732                 } else {
1733                         if (rmtadv & LPA_1000XPAUSE)
1734                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1735                 }
1736         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1737                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1738                         cap = FLOW_CTRL_TX;
1739         }
1740
1741         return cap;
1742 }
1743
1744 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1745 {
1746         u8 autoneg;
1747         u8 flowctrl = 0;
1748         u32 old_rx_mode = tp->rx_mode;
1749         u32 old_tx_mode = tp->tx_mode;
1750
1751         if (tg3_flag(tp, USE_PHYLIB))
1752                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1753         else
1754                 autoneg = tp->link_config.autoneg;
1755
1756         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1757                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1758                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1759                 else
1760                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1761         } else
1762                 flowctrl = tp->link_config.flowctrl;
1763
1764         tp->link_config.active_flowctrl = flowctrl;
1765
1766         if (flowctrl & FLOW_CTRL_RX)
1767                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1768         else
1769                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1770
1771         if (old_rx_mode != tp->rx_mode)
1772                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1773
1774         if (flowctrl & FLOW_CTRL_TX)
1775                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1776         else
1777                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1778
1779         if (old_tx_mode != tp->tx_mode)
1780                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1781 }
1782
1783 static void tg3_adjust_link(struct net_device *dev)
1784 {
1785         u8 oldflowctrl, linkmesg = 0;
1786         u32 mac_mode, lcl_adv, rmt_adv;
1787         struct tg3 *tp = netdev_priv(dev);
1788         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1789
1790         spin_lock_bh(&tp->lock);
1791
1792         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1793                                     MAC_MODE_HALF_DUPLEX);
1794
1795         oldflowctrl = tp->link_config.active_flowctrl;
1796
1797         if (phydev->link) {
1798                 lcl_adv = 0;
1799                 rmt_adv = 0;
1800
1801                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1802                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1803                 else if (phydev->speed == SPEED_1000 ||
1804                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1805                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1806                 else
1807                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1808
1809                 if (phydev->duplex == DUPLEX_HALF)
1810                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1811                 else {
1812                         lcl_adv = tg3_advert_flowctrl_1000T(
1813                                   tp->link_config.flowctrl);
1814
1815                         if (phydev->pause)
1816                                 rmt_adv = LPA_PAUSE_CAP;
1817                         if (phydev->asym_pause)
1818                                 rmt_adv |= LPA_PAUSE_ASYM;
1819                 }
1820
1821                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1822         } else
1823                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1824
1825         if (mac_mode != tp->mac_mode) {
1826                 tp->mac_mode = mac_mode;
1827                 tw32_f(MAC_MODE, tp->mac_mode);
1828                 udelay(40);
1829         }
1830
1831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1832                 if (phydev->speed == SPEED_10)
1833                         tw32(MAC_MI_STAT,
1834                              MAC_MI_STAT_10MBPS_MODE |
1835                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1836                 else
1837                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1838         }
1839
1840         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1841                 tw32(MAC_TX_LENGTHS,
1842                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1843                       (6 << TX_LENGTHS_IPG_SHIFT) |
1844                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1845         else
1846                 tw32(MAC_TX_LENGTHS,
1847                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1848                       (6 << TX_LENGTHS_IPG_SHIFT) |
1849                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1850
1851         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1852             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1853             phydev->speed != tp->link_config.active_speed ||
1854             phydev->duplex != tp->link_config.active_duplex ||
1855             oldflowctrl != tp->link_config.active_flowctrl)
1856                 linkmesg = 1;
1857
1858         tp->link_config.active_speed = phydev->speed;
1859         tp->link_config.active_duplex = phydev->duplex;
1860
1861         spin_unlock_bh(&tp->lock);
1862
1863         if (linkmesg)
1864                 tg3_link_report(tp);
1865 }
1866
1867 static int tg3_phy_init(struct tg3 *tp)
1868 {
1869         struct phy_device *phydev;
1870
1871         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1872                 return 0;
1873
1874         /* Bring the PHY back to a known state. */
1875         tg3_bmcr_reset(tp);
1876
1877         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1878
1879         /* Attach the MAC to the PHY. */
1880         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1881                              phydev->dev_flags, phydev->interface);
1882         if (IS_ERR(phydev)) {
1883                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1884                 return PTR_ERR(phydev);
1885         }
1886
1887         /* Mask with MAC supported features. */
1888         switch (phydev->interface) {
1889         case PHY_INTERFACE_MODE_GMII:
1890         case PHY_INTERFACE_MODE_RGMII:
1891                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1892                         phydev->supported &= (PHY_GBIT_FEATURES |
1893                                               SUPPORTED_Pause |
1894                                               SUPPORTED_Asym_Pause);
1895                         break;
1896                 }
1897                 /* fallthru */
1898         case PHY_INTERFACE_MODE_MII:
1899                 phydev->supported &= (PHY_BASIC_FEATURES |
1900                                       SUPPORTED_Pause |
1901                                       SUPPORTED_Asym_Pause);
1902                 break;
1903         default:
1904                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1905                 return -EINVAL;
1906         }
1907
1908         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1909
1910         phydev->advertising = phydev->supported;
1911
1912         return 0;
1913 }
1914
1915 static void tg3_phy_start(struct tg3 *tp)
1916 {
1917         struct phy_device *phydev;
1918
1919         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1920                 return;
1921
1922         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1923
1924         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1925                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1926                 phydev->speed = tp->link_config.orig_speed;
1927                 phydev->duplex = tp->link_config.orig_duplex;
1928                 phydev->autoneg = tp->link_config.orig_autoneg;
1929                 phydev->advertising = tp->link_config.orig_advertising;
1930         }
1931
1932         phy_start(phydev);
1933
1934         phy_start_aneg(phydev);
1935 }
1936
1937 static void tg3_phy_stop(struct tg3 *tp)
1938 {
1939         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1940                 return;
1941
1942         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1943 }
1944
1945 static void tg3_phy_fini(struct tg3 *tp)
1946 {
1947         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1948                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1949                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1950         }
1951 }
1952
1953 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1954 {
1955         int err;
1956         u32 val;
1957
1958         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1959                 return 0;
1960
1961         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1962                 /* Cannot do read-modify-write on 5401 */
1963                 err = tg3_phy_auxctl_write(tp,
1964                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1965                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1966                                            0x4c20);
1967                 goto done;
1968         }
1969
1970         err = tg3_phy_auxctl_read(tp,
1971                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1972         if (err)
1973                 return err;
1974
1975         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1976         err = tg3_phy_auxctl_write(tp,
1977                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1978
1979 done:
1980         return err;
1981 }
1982
1983 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1984 {
1985         u32 phytest;
1986
1987         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1988                 u32 phy;
1989
1990                 tg3_writephy(tp, MII_TG3_FET_TEST,
1991                              phytest | MII_TG3_FET_SHADOW_EN);
1992                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1993                         if (enable)
1994                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1995                         else
1996                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1997                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1998                 }
1999                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2000         }
2001 }
2002
2003 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2004 {
2005         u32 reg;
2006
2007         if (!tg3_flag(tp, 5705_PLUS) ||
2008             (tg3_flag(tp, 5717_PLUS) &&
2009              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2010                 return;
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2013                 tg3_phy_fet_toggle_apd(tp, enable);
2014                 return;
2015         }
2016
2017         reg = MII_TG3_MISC_SHDW_WREN |
2018               MII_TG3_MISC_SHDW_SCR5_SEL |
2019               MII_TG3_MISC_SHDW_SCR5_LPED |
2020               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2021               MII_TG3_MISC_SHDW_SCR5_SDTL |
2022               MII_TG3_MISC_SHDW_SCR5_C125OE;
2023         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2024                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2025
2026         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2027
2028
2029         reg = MII_TG3_MISC_SHDW_WREN |
2030               MII_TG3_MISC_SHDW_APD_SEL |
2031               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2032         if (enable)
2033                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2034
2035         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2036 }
2037
2038 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2039 {
2040         u32 phy;
2041
2042         if (!tg3_flag(tp, 5705_PLUS) ||
2043             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2044                 return;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2047                 u32 ephy;
2048
2049                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2050                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2051
2052                         tg3_writephy(tp, MII_TG3_FET_TEST,
2053                                      ephy | MII_TG3_FET_SHADOW_EN);
2054                         if (!tg3_readphy(tp, reg, &phy)) {
2055                                 if (enable)
2056                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2057                                 else
2058                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2059                                 tg3_writephy(tp, reg, phy);
2060                         }
2061                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2062                 }
2063         } else {
2064                 int ret;
2065
2066                 ret = tg3_phy_auxctl_read(tp,
2067                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2068                 if (!ret) {
2069                         if (enable)
2070                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2071                         else
2072                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2073                         tg3_phy_auxctl_write(tp,
2074                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2075                 }
2076         }
2077 }
2078
2079 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2080 {
2081         int ret;
2082         u32 val;
2083
2084         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2085                 return;
2086
2087         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2088         if (!ret)
2089                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2090                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2091 }
2092
2093 static void tg3_phy_apply_otp(struct tg3 *tp)
2094 {
2095         u32 otp, phy;
2096
2097         if (!tp->phy_otp)
2098                 return;
2099
2100         otp = tp->phy_otp;
2101
2102         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2103                 return;
2104
2105         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2106         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2107         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2108
2109         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2110               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2111         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2112
2113         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2114         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2115         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2116
2117         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2118         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2119
2120         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2121         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2122
2123         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2124               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2125         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2126
2127         tg3_phy_toggle_auxctl_smdsp(tp, false);
2128 }
2129
2130 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2131 {
2132         u32 val;
2133
2134         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2135                 return;
2136
2137         tp->setlpicnt = 0;
2138
2139         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2140             current_link_up == 1 &&
2141             tp->link_config.active_duplex == DUPLEX_FULL &&
2142             (tp->link_config.active_speed == SPEED_100 ||
2143              tp->link_config.active_speed == SPEED_1000)) {
2144                 u32 eeectl;
2145
2146                 if (tp->link_config.active_speed == SPEED_1000)
2147                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2148                 else
2149                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2150
2151                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2152
2153                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2154                                   TG3_CL45_D7_EEERES_STAT, &val);
2155
2156                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2157                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2158                         tp->setlpicnt = 2;
2159         }
2160
2161         if (!tp->setlpicnt) {
2162                 if (current_link_up == 1 &&
2163                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2164                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2165                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2166                 }
2167
2168                 val = tr32(TG3_CPMU_EEE_MODE);
2169                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2170         }
2171 }
2172
2173 static void tg3_phy_eee_enable(struct tg3 *tp)
2174 {
2175         u32 val;
2176
2177         if (tp->link_config.active_speed == SPEED_1000 &&
2178             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2181             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2182                 val = MII_TG3_DSP_TAP26_ALNOKO |
2183                       MII_TG3_DSP_TAP26_RMRXSTO;
2184                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2185                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2186         }
2187
2188         val = tr32(TG3_CPMU_EEE_MODE);
2189         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2190 }
2191
2192 static int tg3_wait_macro_done(struct tg3 *tp)
2193 {
2194         int limit = 100;
2195
2196         while (limit--) {
2197                 u32 tmp32;
2198
2199                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2200                         if ((tmp32 & 0x1000) == 0)
2201                                 break;
2202                 }
2203         }
2204         if (limit < 0)
2205                 return -EBUSY;
2206
2207         return 0;
2208 }
2209
2210 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2211 {
2212         static const u32 test_pat[4][6] = {
2213         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2214         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2215         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2216         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2217         };
2218         int chan;
2219
2220         for (chan = 0; chan < 4; chan++) {
2221                 int i;
2222
2223                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2224                              (chan * 0x2000) | 0x0200);
2225                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2226
2227                 for (i = 0; i < 6; i++)
2228                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2229                                      test_pat[chan][i]);
2230
2231                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2232                 if (tg3_wait_macro_done(tp)) {
2233                         *resetp = 1;
2234                         return -EBUSY;
2235                 }
2236
2237                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2238                              (chan * 0x2000) | 0x0200);
2239                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2240                 if (tg3_wait_macro_done(tp)) {
2241                         *resetp = 1;
2242                         return -EBUSY;
2243                 }
2244
2245                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2246                 if (tg3_wait_macro_done(tp)) {
2247                         *resetp = 1;
2248                         return -EBUSY;
2249                 }
2250
2251                 for (i = 0; i < 6; i += 2) {
2252                         u32 low, high;
2253
2254                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2255                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2256                             tg3_wait_macro_done(tp)) {
2257                                 *resetp = 1;
2258                                 return -EBUSY;
2259                         }
2260                         low &= 0x7fff;
2261                         high &= 0x000f;
2262                         if (low != test_pat[chan][i] ||
2263                             high != test_pat[chan][i+1]) {
2264                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2265                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2266                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2267
2268                                 return -EBUSY;
2269                         }
2270                 }
2271         }
2272
2273         return 0;
2274 }
2275
2276 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2277 {
2278         int chan;
2279
2280         for (chan = 0; chan < 4; chan++) {
2281                 int i;
2282
2283                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2284                              (chan * 0x2000) | 0x0200);
2285                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2286                 for (i = 0; i < 6; i++)
2287                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2288                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2289                 if (tg3_wait_macro_done(tp))
2290                         return -EBUSY;
2291         }
2292
2293         return 0;
2294 }
2295
2296 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2297 {
2298         u32 reg32, phy9_orig;
2299         int retries, do_phy_reset, err;
2300
2301         retries = 10;
2302         do_phy_reset = 1;
2303         do {
2304                 if (do_phy_reset) {
2305                         err = tg3_bmcr_reset(tp);
2306                         if (err)
2307                                 return err;
2308                         do_phy_reset = 0;
2309                 }
2310
2311                 /* Disable transmitter and interrupt.  */
2312                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2313                         continue;
2314
2315                 reg32 |= 0x3000;
2316                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2317
2318                 /* Set full-duplex, 1000 mbps.  */
2319                 tg3_writephy(tp, MII_BMCR,
2320                              BMCR_FULLDPLX | BMCR_SPEED1000);
2321
2322                 /* Set to master mode.  */
2323                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2324                         continue;
2325
2326                 tg3_writephy(tp, MII_CTRL1000,
2327                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2328
2329                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2330                 if (err)
2331                         return err;
2332
2333                 /* Block the PHY control access.  */
2334                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2335
2336                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2337                 if (!err)
2338                         break;
2339         } while (--retries);
2340
2341         err = tg3_phy_reset_chanpat(tp);
2342         if (err)
2343                 return err;
2344
2345         tg3_phydsp_write(tp, 0x8005, 0x0000);
2346
2347         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2348         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2349
2350         tg3_phy_toggle_auxctl_smdsp(tp, false);
2351
2352         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2353
2354         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2355                 reg32 &= ~0x3000;
2356                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2357         } else if (!err)
2358                 err = -EBUSY;
2359
2360         return err;
2361 }
2362
2363 /* This will reset the tigon3 PHY if there is no valid
2364  * link unless the FORCE argument is non-zero.
2365  */
2366 static int tg3_phy_reset(struct tg3 *tp)
2367 {
2368         u32 val, cpmuctrl;
2369         int err;
2370
2371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2372                 val = tr32(GRC_MISC_CFG);
2373                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2374                 udelay(40);
2375         }
2376         err  = tg3_readphy(tp, MII_BMSR, &val);
2377         err |= tg3_readphy(tp, MII_BMSR, &val);
2378         if (err != 0)
2379                 return -EBUSY;
2380
2381         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2382                 netif_carrier_off(tp->dev);
2383                 tg3_link_report(tp);
2384         }
2385
2386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2389                 err = tg3_phy_reset_5703_4_5(tp);
2390                 if (err)
2391                         return err;
2392                 goto out;
2393         }
2394
2395         cpmuctrl = 0;
2396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2397             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2398                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2399                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2400                         tw32(TG3_CPMU_CTRL,
2401                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2402         }
2403
2404         err = tg3_bmcr_reset(tp);
2405         if (err)
2406                 return err;
2407
2408         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2409                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2410                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2411
2412                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2413         }
2414
2415         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2416             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2417                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2418                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2419                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2420                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2421                         udelay(40);
2422                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2423                 }
2424         }
2425
2426         if (tg3_flag(tp, 5717_PLUS) &&
2427             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2428                 return 0;
2429
2430         tg3_phy_apply_otp(tp);
2431
2432         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2433                 tg3_phy_toggle_apd(tp, true);
2434         else
2435                 tg3_phy_toggle_apd(tp, false);
2436
2437 out:
2438         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2439             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2440                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2441                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2442                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2443         }
2444
2445         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2446                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2447                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2448         }
2449
2450         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2451                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2452                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2453                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2454                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2455                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2456                 }
2457         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2458                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2459                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2460                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2461                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2462                                 tg3_writephy(tp, MII_TG3_TEST1,
2463                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2464                         } else
2465                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2466
2467                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2468                 }
2469         }
2470
2471         /* Set Extended packet length bit (bit 14) on all chips that */
2472         /* support jumbo frames */
2473         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2474                 /* Cannot do read-modify-write on 5401 */
2475                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2476         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2477                 /* Set bit 14 with read-modify-write to preserve other bits */
2478                 err = tg3_phy_auxctl_read(tp,
2479                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2480                 if (!err)
2481                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2482                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2483         }
2484
2485         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2486          * jumbo frames transmission.
2487          */
2488         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2489                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2490                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2491                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2492         }
2493
2494         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2495                 /* adjust output voltage */
2496                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2497         }
2498
2499         tg3_phy_toggle_automdix(tp, 1);
2500         tg3_phy_set_wirespeed(tp);
2501         return 0;
2502 }
2503
2504 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2505 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2506 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2507                                           TG3_GPIO_MSG_NEED_VAUX)
2508 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2509         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2510          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2511          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2512          (TG3_GPIO_MSG_DRVR_PRES << 12))
2513
2514 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2515         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2516          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2517          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2518          (TG3_GPIO_MSG_NEED_VAUX << 12))
2519
2520 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2521 {
2522         u32 status, shift;
2523
2524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2526                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2527         else
2528                 status = tr32(TG3_CPMU_DRV_STATUS);
2529
2530         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2531         status &= ~(TG3_GPIO_MSG_MASK << shift);
2532         status |= (newstat << shift);
2533
2534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2536                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2537         else
2538                 tw32(TG3_CPMU_DRV_STATUS, status);
2539
2540         return status >> TG3_APE_GPIO_MSG_SHIFT;
2541 }
2542
2543 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2544 {
2545         if (!tg3_flag(tp, IS_NIC))
2546                 return 0;
2547
2548         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2551                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2552                         return -EIO;
2553
2554                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2555
2556                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2557                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2560         } else {
2561                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2562                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2563         }
2564
2565         return 0;
2566 }
2567
2568 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2569 {
2570         u32 grc_local_ctrl;
2571
2572         if (!tg3_flag(tp, IS_NIC) ||
2573             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2575                 return;
2576
2577         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2578
2579         tw32_wait_f(GRC_LOCAL_CTRL,
2580                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2581                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583         tw32_wait_f(GRC_LOCAL_CTRL,
2584                     grc_local_ctrl,
2585                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2586
2587         tw32_wait_f(GRC_LOCAL_CTRL,
2588                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2589                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2590 }
2591
2592 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2593 {
2594         if (!tg3_flag(tp, IS_NIC))
2595                 return;
2596
2597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2599                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2600                             (GRC_LCLCTRL_GPIO_OE0 |
2601                              GRC_LCLCTRL_GPIO_OE1 |
2602                              GRC_LCLCTRL_GPIO_OE2 |
2603                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2604                              GRC_LCLCTRL_GPIO_OUTPUT1),
2605                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2606         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2607                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2608                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2609                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2610                                      GRC_LCLCTRL_GPIO_OE1 |
2611                                      GRC_LCLCTRL_GPIO_OE2 |
2612                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2613                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2614                                      tp->grc_local_ctrl;
2615                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2616                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2617
2618                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2619                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2620                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2621
2622                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2623                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2624                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2625         } else {
2626                 u32 no_gpio2;
2627                 u32 grc_local_ctrl = 0;
2628
2629                 /* Workaround to prevent overdrawing Amps. */
2630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2631                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2632                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2633                                     grc_local_ctrl,
2634                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2635                 }
2636
2637                 /* On 5753 and variants, GPIO2 cannot be used. */
2638                 no_gpio2 = tp->nic_sram_data_cfg &
2639                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2640
2641                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2642                                   GRC_LCLCTRL_GPIO_OE1 |
2643                                   GRC_LCLCTRL_GPIO_OE2 |
2644                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2645                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2646                 if (no_gpio2) {
2647                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2648                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2649                 }
2650                 tw32_wait_f(GRC_LOCAL_CTRL,
2651                             tp->grc_local_ctrl | grc_local_ctrl,
2652                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2653
2654                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2655
2656                 tw32_wait_f(GRC_LOCAL_CTRL,
2657                             tp->grc_local_ctrl | grc_local_ctrl,
2658                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2659
2660                 if (!no_gpio2) {
2661                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2662                         tw32_wait_f(GRC_LOCAL_CTRL,
2663                                     tp->grc_local_ctrl | grc_local_ctrl,
2664                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2665                 }
2666         }
2667 }
2668
2669 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2670 {
2671         u32 msg = 0;
2672
2673         /* Serialize power state transitions */
2674         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2675                 return;
2676
2677         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2678                 msg = TG3_GPIO_MSG_NEED_VAUX;
2679
2680         msg = tg3_set_function_status(tp, msg);
2681
2682         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2683                 goto done;
2684
2685         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2686                 tg3_pwrsrc_switch_to_vaux(tp);
2687         else
2688                 tg3_pwrsrc_die_with_vmain(tp);
2689
2690 done:
2691         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2692 }
2693
2694 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2695 {
2696         bool need_vaux = false;
2697
2698         /* The GPIOs do something completely different on 57765. */
2699         if (!tg3_flag(tp, IS_NIC) ||
2700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2701                 return;
2702
2703         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2705             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2706                 tg3_frob_aux_power_5717(tp, include_wol ?
2707                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2708                 return;
2709         }
2710
2711         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2712                 struct net_device *dev_peer;
2713
2714                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2715
2716                 /* remove_one() may have been run on the peer. */
2717                 if (dev_peer) {
2718                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2719
2720                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2721                                 return;
2722
2723                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2724                             tg3_flag(tp_peer, ENABLE_ASF))
2725                                 need_vaux = true;
2726                 }
2727         }
2728
2729         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2730             tg3_flag(tp, ENABLE_ASF))
2731                 need_vaux = true;
2732
2733         if (need_vaux)
2734                 tg3_pwrsrc_switch_to_vaux(tp);
2735         else
2736                 tg3_pwrsrc_die_with_vmain(tp);
2737 }
2738
2739 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2740 {
2741         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2742                 return 1;
2743         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2744                 if (speed != SPEED_10)
2745                         return 1;
2746         } else if (speed == SPEED_10)
2747                 return 1;
2748
2749         return 0;
2750 }
2751
2752 static int tg3_setup_phy(struct tg3 *, int);
2753 static int tg3_halt_cpu(struct tg3 *, u32);
2754
2755 static bool tg3_phy_power_bug(struct tg3 *tp)
2756 {
2757         switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2758         case ASIC_REV_5700:
2759         case ASIC_REV_5704:
2760                 return true;
2761         case ASIC_REV_5780:
2762                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2763                         return true;
2764                 return false;
2765         case ASIC_REV_5717:
2766                 if (!tp->pci_fn)
2767                         return true;
2768                 return false;
2769         case ASIC_REV_5719:
2770         case ASIC_REV_5720:
2771                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2772                     !tp->pci_fn)
2773                         return true;
2774                 return false;
2775         }
2776
2777         return false;
2778 }
2779
2780 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2781 {
2782         u32 val;
2783
2784         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2786                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2787                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2788
2789                         sg_dig_ctrl |=
2790                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2791                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2792                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2793                 }
2794                 return;
2795         }
2796
2797         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2798                 tg3_bmcr_reset(tp);
2799                 val = tr32(GRC_MISC_CFG);
2800                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2801                 udelay(40);
2802                 return;
2803         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2804                 u32 phytest;
2805                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2806                         u32 phy;
2807
2808                         tg3_writephy(tp, MII_ADVERTISE, 0);
2809                         tg3_writephy(tp, MII_BMCR,
2810                                      BMCR_ANENABLE | BMCR_ANRESTART);
2811
2812                         tg3_writephy(tp, MII_TG3_FET_TEST,
2813                                      phytest | MII_TG3_FET_SHADOW_EN);
2814                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2815                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2816                                 tg3_writephy(tp,
2817                                              MII_TG3_FET_SHDW_AUXMODE4,
2818                                              phy);
2819                         }
2820                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2821                 }
2822                 return;
2823         } else if (do_low_power) {
2824                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2825                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2826
2827                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2828                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2829                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2830                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2831         }
2832
2833         /* The PHY should not be powered down on some chips because
2834          * of bugs.
2835          */
2836         if (tg3_phy_power_bug(tp))
2837                 return;
2838
2839         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2840             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2841                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2842                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2843                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2844                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2845         }
2846
2847         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2848 }
2849
2850 /* tp->lock is held. */
2851 static int tg3_nvram_lock(struct tg3 *tp)
2852 {
2853         if (tg3_flag(tp, NVRAM)) {
2854                 int i;
2855
2856                 if (tp->nvram_lock_cnt == 0) {
2857                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2858                         for (i = 0; i < 8000; i++) {
2859                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2860                                         break;
2861                                 udelay(20);
2862                         }
2863                         if (i == 8000) {
2864                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2865                                 return -ENODEV;
2866                         }
2867                 }
2868                 tp->nvram_lock_cnt++;
2869         }
2870         return 0;
2871 }
2872
2873 /* tp->lock is held. */
2874 static void tg3_nvram_unlock(struct tg3 *tp)
2875 {
2876         if (tg3_flag(tp, NVRAM)) {
2877                 if (tp->nvram_lock_cnt > 0)
2878                         tp->nvram_lock_cnt--;
2879                 if (tp->nvram_lock_cnt == 0)
2880                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2881         }
2882 }
2883
2884 /* tp->lock is held. */
2885 static void tg3_enable_nvram_access(struct tg3 *tp)
2886 {
2887         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2888                 u32 nvaccess = tr32(NVRAM_ACCESS);
2889
2890                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2891         }
2892 }
2893
2894 /* tp->lock is held. */
2895 static void tg3_disable_nvram_access(struct tg3 *tp)
2896 {
2897         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2898                 u32 nvaccess = tr32(NVRAM_ACCESS);
2899
2900                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2901         }
2902 }
2903
2904 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2905                                         u32 offset, u32 *val)
2906 {
2907         u32 tmp;
2908         int i;
2909
2910         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2911                 return -EINVAL;
2912
2913         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2914                                         EEPROM_ADDR_DEVID_MASK |
2915                                         EEPROM_ADDR_READ);
2916         tw32(GRC_EEPROM_ADDR,
2917              tmp |
2918              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2919              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2920               EEPROM_ADDR_ADDR_MASK) |
2921              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2922
2923         for (i = 0; i < 1000; i++) {
2924                 tmp = tr32(GRC_EEPROM_ADDR);
2925
2926                 if (tmp & EEPROM_ADDR_COMPLETE)
2927                         break;
2928                 msleep(1);
2929         }
2930         if (!(tmp & EEPROM_ADDR_COMPLETE))
2931                 return -EBUSY;
2932
2933         tmp = tr32(GRC_EEPROM_DATA);
2934
2935         /*
2936          * The data will always be opposite the native endian
2937          * format.  Perform a blind byteswap to compensate.
2938          */
2939         *val = swab32(tmp);
2940
2941         return 0;
2942 }
2943
2944 #define NVRAM_CMD_TIMEOUT 10000
2945
2946 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2947 {
2948         int i;
2949
2950         tw32(NVRAM_CMD, nvram_cmd);
2951         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2952                 udelay(10);
2953                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2954                         udelay(10);
2955                         break;
2956                 }
2957         }
2958
2959         if (i == NVRAM_CMD_TIMEOUT)
2960                 return -EBUSY;
2961
2962         return 0;
2963 }
2964
2965 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2966 {
2967         if (tg3_flag(tp, NVRAM) &&
2968             tg3_flag(tp, NVRAM_BUFFERED) &&
2969             tg3_flag(tp, FLASH) &&
2970             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2971             (tp->nvram_jedecnum == JEDEC_ATMEL))
2972
2973                 addr = ((addr / tp->nvram_pagesize) <<
2974                         ATMEL_AT45DB0X1B_PAGE_POS) +
2975                        (addr % tp->nvram_pagesize);
2976
2977         return addr;
2978 }
2979
2980 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2981 {
2982         if (tg3_flag(tp, NVRAM) &&
2983             tg3_flag(tp, NVRAM_BUFFERED) &&
2984             tg3_flag(tp, FLASH) &&
2985             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2986             (tp->nvram_jedecnum == JEDEC_ATMEL))
2987
2988                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2989                         tp->nvram_pagesize) +
2990                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2991
2992         return addr;
2993 }
2994
2995 /* NOTE: Data read in from NVRAM is byteswapped according to
2996  * the byteswapping settings for all other register accesses.
2997  * tg3 devices are BE devices, so on a BE machine, the data
2998  * returned will be exactly as it is seen in NVRAM.  On a LE
2999  * machine, the 32-bit value will be byteswapped.
3000  */
3001 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3002 {
3003         int ret;
3004
3005         if (!tg3_flag(tp, NVRAM))
3006                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3007
3008         offset = tg3_nvram_phys_addr(tp, offset);
3009
3010         if (offset > NVRAM_ADDR_MSK)
3011                 return -EINVAL;
3012
3013         ret = tg3_nvram_lock(tp);
3014         if (ret)
3015                 return ret;
3016
3017         tg3_enable_nvram_access(tp);
3018
3019         tw32(NVRAM_ADDR, offset);
3020         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3021                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3022
3023         if (ret == 0)
3024                 *val = tr32(NVRAM_RDDATA);
3025
3026         tg3_disable_nvram_access(tp);
3027
3028         tg3_nvram_unlock(tp);
3029
3030         return ret;
3031 }
3032
3033 /* Ensures NVRAM data is in bytestream format. */
3034 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3035 {
3036         u32 v;
3037         int res = tg3_nvram_read(tp, offset, &v);
3038         if (!res)
3039                 *val = cpu_to_be32(v);
3040         return res;
3041 }
3042
3043 #define RX_CPU_SCRATCH_BASE     0x30000
3044 #define RX_CPU_SCRATCH_SIZE     0x04000
3045 #define TX_CPU_SCRATCH_BASE     0x34000
3046 #define TX_CPU_SCRATCH_SIZE     0x04000
3047
3048 /* tp->lock is held. */
3049 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3050 {
3051         int i;
3052
3053         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3054
3055         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3056                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3057
3058                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3059                 return 0;
3060         }
3061         if (offset == RX_CPU_BASE) {
3062                 for (i = 0; i < 10000; i++) {
3063                         tw32(offset + CPU_STATE, 0xffffffff);
3064                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3065                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3066                                 break;
3067                 }
3068
3069                 tw32(offset + CPU_STATE, 0xffffffff);
3070                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3071                 udelay(10);
3072         } else {
3073                 for (i = 0; i < 10000; i++) {
3074                         tw32(offset + CPU_STATE, 0xffffffff);
3075                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3076                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3077                                 break;
3078                 }
3079         }
3080
3081         if (i >= 10000) {
3082                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3083                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3084                 return -ENODEV;
3085         }
3086
3087         /* Clear firmware's nvram arbitration. */
3088         if (tg3_flag(tp, NVRAM))
3089                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3090         return 0;
3091 }
3092
3093 struct fw_info {
3094         unsigned int fw_base;
3095         unsigned int fw_len;
3096         const __be32 *fw_data;
3097 };
3098
3099 /* tp->lock is held. */
3100 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3101                                  u32 cpu_scratch_base, int cpu_scratch_size,
3102                                  struct fw_info *info)
3103 {
3104         int err, lock_err, i;
3105         void (*write_op)(struct tg3 *, u32, u32);
3106
3107         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3108                 netdev_err(tp->dev,
3109                            "%s: Trying to load TX cpu firmware which is 5705\n",
3110                            __func__);
3111                 return -EINVAL;
3112         }
3113
3114         if (tg3_flag(tp, 5705_PLUS))
3115                 write_op = tg3_write_mem;
3116         else
3117                 write_op = tg3_write_indirect_reg32;
3118
3119         /* It is possible that bootcode is still loading at this point.
3120          * Get the nvram lock first before halting the cpu.
3121          */
3122         lock_err = tg3_nvram_lock(tp);
3123         err = tg3_halt_cpu(tp, cpu_base);
3124         if (!lock_err)
3125                 tg3_nvram_unlock(tp);
3126         if (err)
3127                 goto out;
3128
3129         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3130                 write_op(tp, cpu_scratch_base + i, 0);
3131         tw32(cpu_base + CPU_STATE, 0xffffffff);
3132         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3133         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3134                 write_op(tp, (cpu_scratch_base +
3135                               (info->fw_base & 0xffff) +
3136                               (i * sizeof(u32))),
3137                               be32_to_cpu(info->fw_data[i]));
3138
3139         err = 0;
3140
3141 out:
3142         return err;
3143 }
3144
3145 /* tp->lock is held. */
3146 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3147 {
3148         struct fw_info info;
3149         const __be32 *fw_data;
3150         int err, i;
3151
3152         fw_data = (void *)tp->fw->data;
3153
3154         /* Firmware blob starts with version numbers, followed by
3155            start address and length. We are setting complete length.
3156            length = end_address_of_bss - start_address_of_text.
3157            Remainder is the blob to be loaded contiguously
3158            from start address. */
3159
3160         info.fw_base = be32_to_cpu(fw_data[1]);
3161         info.fw_len = tp->fw->size - 12;
3162         info.fw_data = &fw_data[3];
3163
3164         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3165                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3166                                     &info);
3167         if (err)
3168                 return err;
3169
3170         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3171                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3172                                     &info);
3173         if (err)
3174                 return err;
3175
3176         /* Now startup only the RX cpu. */
3177         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3178         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3179
3180         for (i = 0; i < 5; i++) {
3181                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3182                         break;
3183                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3184                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3185                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3186                 udelay(1000);
3187         }
3188         if (i >= 5) {
3189                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3190                            "should be %08x\n", __func__,
3191                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3192                 return -ENODEV;
3193         }
3194         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3195         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3196
3197         return 0;
3198 }
3199
3200 /* tp->lock is held. */
3201 static int tg3_load_tso_firmware(struct tg3 *tp)
3202 {
3203         struct fw_info info;
3204         const __be32 *fw_data;
3205         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3206         int err, i;
3207
3208         if (tg3_flag(tp, HW_TSO_1) ||
3209             tg3_flag(tp, HW_TSO_2) ||
3210             tg3_flag(tp, HW_TSO_3))
3211                 return 0;
3212
3213         fw_data = (void *)tp->fw->data;
3214
3215         /* Firmware blob starts with version numbers, followed by
3216            start address and length. We are setting complete length.
3217            length = end_address_of_bss - start_address_of_text.
3218            Remainder is the blob to be loaded contiguously
3219            from start address. */
3220
3221         info.fw_base = be32_to_cpu(fw_data[1]);
3222         cpu_scratch_size = tp->fw_len;
3223         info.fw_len = tp->fw->size - 12;
3224         info.fw_data = &fw_data[3];
3225
3226         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3227                 cpu_base = RX_CPU_BASE;
3228                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3229         } else {
3230                 cpu_base = TX_CPU_BASE;
3231                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3232                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3233         }
3234
3235         err = tg3_load_firmware_cpu(tp, cpu_base,
3236                                     cpu_scratch_base, cpu_scratch_size,
3237                                     &info);
3238         if (err)
3239                 return err;
3240
3241         /* Now startup the cpu. */
3242         tw32(cpu_base + CPU_STATE, 0xffffffff);
3243         tw32_f(cpu_base + CPU_PC, info.fw_base);
3244
3245         for (i = 0; i < 5; i++) {
3246                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3247                         break;
3248                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3249                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3250                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3251                 udelay(1000);
3252         }
3253         if (i >= 5) {
3254                 netdev_err(tp->dev,
3255                            "%s fails to set CPU PC, is %08x should be %08x\n",
3256                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3257                 return -ENODEV;
3258         }
3259         tw32(cpu_base + CPU_STATE, 0xffffffff);
3260         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3261         return 0;
3262 }
3263
3264
3265 /* tp->lock is held. */
3266 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3267 {
3268         u32 addr_high, addr_low;
3269         int i;
3270
3271         addr_high = ((tp->dev->dev_addr[0] << 8) |
3272                      tp->dev->dev_addr[1]);
3273         addr_low = ((tp->dev->dev_addr[2] << 24) |
3274                     (tp->dev->dev_addr[3] << 16) |
3275                     (tp->dev->dev_addr[4] <<  8) |
3276                     (tp->dev->dev_addr[5] <<  0));
3277         for (i = 0; i < 4; i++) {
3278                 if (i == 1 && skip_mac_1)
3279                         continue;
3280                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3281                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3282         }
3283
3284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3285             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3286                 for (i = 0; i < 12; i++) {
3287                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3288                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3289                 }
3290         }
3291
3292         addr_high = (tp->dev->dev_addr[0] +
3293                      tp->dev->dev_addr[1] +
3294                      tp->dev->dev_addr[2] +
3295                      tp->dev->dev_addr[3] +
3296                      tp->dev->dev_addr[4] +
3297                      tp->dev->dev_addr[5]) &
3298                 TX_BACKOFF_SEED_MASK;
3299         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3300 }
3301
3302 static void tg3_enable_register_access(struct tg3 *tp)
3303 {
3304         /*
3305          * Make sure register accesses (indirect or otherwise) will function
3306          * correctly.
3307          */
3308         pci_write_config_dword(tp->pdev,
3309                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3310 }
3311
3312 static int tg3_power_up(struct tg3 *tp)
3313 {
3314         int err;
3315
3316         tg3_enable_register_access(tp);
3317
3318         err = pci_set_power_state(tp->pdev, PCI_D0);
3319         if (!err) {
3320                 /* Switch out of Vaux if it is a NIC */
3321                 tg3_pwrsrc_switch_to_vmain(tp);
3322         } else {
3323                 netdev_err(tp->dev, "Transition to D0 failed\n");
3324         }
3325
3326         return err;
3327 }
3328
3329 static int tg3_power_down_prepare(struct tg3 *tp)
3330 {
3331         u32 misc_host_ctrl;
3332         bool device_should_wake, do_low_power;
3333
3334         tg3_enable_register_access(tp);
3335
3336         /* Restore the CLKREQ setting. */
3337         if (tg3_flag(tp, CLKREQ_BUG)) {
3338                 u16 lnkctl;
3339
3340                 pci_read_config_word(tp->pdev,
3341                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3342                                      &lnkctl);
3343                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3344                 pci_write_config_word(tp->pdev,
3345                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3346                                       lnkctl);
3347         }
3348
3349         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3350         tw32(TG3PCI_MISC_HOST_CTRL,
3351              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3352
3353         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3354                              tg3_flag(tp, WOL_ENABLE);
3355
3356         if (tg3_flag(tp, USE_PHYLIB)) {
3357                 do_low_power = false;
3358                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3359                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3360                         struct phy_device *phydev;
3361                         u32 phyid, advertising;
3362
3363                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3364
3365                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3366
3367                         tp->link_config.orig_speed = phydev->speed;
3368                         tp->link_config.orig_duplex = phydev->duplex;
3369                         tp->link_config.orig_autoneg = phydev->autoneg;
3370                         tp->link_config.orig_advertising = phydev->advertising;
3371
3372                         advertising = ADVERTISED_TP |
3373                                       ADVERTISED_Pause |
3374                                       ADVERTISED_Autoneg |
3375                                       ADVERTISED_10baseT_Half;
3376
3377                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3378                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3379                                         advertising |=
3380                                                 ADVERTISED_100baseT_Half |
3381                                                 ADVERTISED_100baseT_Full |
3382                                                 ADVERTISED_10baseT_Full;
3383                                 else
3384                                         advertising |= ADVERTISED_10baseT_Full;
3385                         }
3386
3387                         phydev->advertising = advertising;
3388
3389                         phy_start_aneg(phydev);
3390
3391                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3392                         if (phyid != PHY_ID_BCMAC131) {
3393                                 phyid &= PHY_BCM_OUI_MASK;
3394                                 if (phyid == PHY_BCM_OUI_1 ||
3395                                     phyid == PHY_BCM_OUI_2 ||
3396                                     phyid == PHY_BCM_OUI_3)
3397                                         do_low_power = true;
3398                         }
3399                 }
3400         } else {
3401                 do_low_power = true;
3402
3403                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3404                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3405                         tp->link_config.orig_speed = tp->link_config.speed;
3406                         tp->link_config.orig_duplex = tp->link_config.duplex;
3407                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3408                 }
3409
3410                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3411                         tp->link_config.speed = SPEED_10;
3412                         tp->link_config.duplex = DUPLEX_HALF;
3413                         tp->link_config.autoneg = AUTONEG_ENABLE;
3414                         tg3_setup_phy(tp, 0);
3415                 }
3416         }
3417
3418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3419                 u32 val;
3420
3421                 val = tr32(GRC_VCPU_EXT_CTRL);
3422                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3423         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3424                 int i;
3425                 u32 val;
3426
3427                 for (i = 0; i < 200; i++) {
3428                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3429                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3430                                 break;
3431                         msleep(1);
3432                 }
3433         }
3434         if (tg3_flag(tp, WOL_CAP))
3435                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3436                                                      WOL_DRV_STATE_SHUTDOWN |
3437                                                      WOL_DRV_WOL |
3438                                                      WOL_SET_MAGIC_PKT);
3439
3440         if (device_should_wake) {
3441                 u32 mac_mode;
3442
3443                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3444                         if (do_low_power &&
3445                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3446                                 tg3_phy_auxctl_write(tp,
3447                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3448                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3449                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3450                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3451                                 udelay(40);
3452                         }
3453
3454                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3455                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3456                         else
3457                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3458
3459                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3460                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3461                             ASIC_REV_5700) {
3462                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3463                                              SPEED_100 : SPEED_10;
3464                                 if (tg3_5700_link_polarity(tp, speed))
3465                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3466                                 else
3467                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3468                         }
3469                 } else {
3470                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3471                 }
3472
3473                 if (!tg3_flag(tp, 5750_PLUS))
3474                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3475
3476                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3477                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3478                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3479                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3480
3481                 if (tg3_flag(tp, ENABLE_APE))
3482                         mac_mode |= MAC_MODE_APE_TX_EN |
3483                                     MAC_MODE_APE_RX_EN |
3484                                     MAC_MODE_TDE_ENABLE;
3485
3486                 tw32_f(MAC_MODE, mac_mode);
3487                 udelay(100);
3488
3489                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3490                 udelay(10);
3491         }
3492
3493         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3494             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3495              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3496                 u32 base_val;
3497
3498                 base_val = tp->pci_clock_ctrl;
3499                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3500                              CLOCK_CTRL_TXCLK_DISABLE);
3501
3502                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3503                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3504         } else if (tg3_flag(tp, 5780_CLASS) ||
3505                    tg3_flag(tp, CPMU_PRESENT) ||
3506                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3507                 /* do nothing */
3508         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3509                 u32 newbits1, newbits2;
3510
3511                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3512                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3513                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3514                                     CLOCK_CTRL_TXCLK_DISABLE |
3515                                     CLOCK_CTRL_ALTCLK);
3516                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3517                 } else if (tg3_flag(tp, 5705_PLUS)) {
3518                         newbits1 = CLOCK_CTRL_625_CORE;
3519                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3520                 } else {
3521                         newbits1 = CLOCK_CTRL_ALTCLK;
3522                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3523                 }
3524
3525                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3526                             40);
3527
3528                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3529                             40);
3530
3531                 if (!tg3_flag(tp, 5705_PLUS)) {
3532                         u32 newbits3;
3533
3534                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3535                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3536                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3537                                             CLOCK_CTRL_TXCLK_DISABLE |
3538                                             CLOCK_CTRL_44MHZ_CORE);
3539                         } else {
3540                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3541                         }
3542
3543                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3544                                     tp->pci_clock_ctrl | newbits3, 40);
3545                 }
3546         }
3547
3548         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3549                 tg3_power_down_phy(tp, do_low_power);
3550
3551         tg3_frob_aux_power(tp, true);
3552
3553         /* Workaround for unstable PLL clock */
3554         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3555             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3556                 u32 val = tr32(0x7d00);
3557
3558                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3559                 tw32(0x7d00, val);
3560                 if (!tg3_flag(tp, ENABLE_ASF)) {
3561                         int err;
3562
3563                         err = tg3_nvram_lock(tp);
3564                         tg3_halt_cpu(tp, RX_CPU_BASE);
3565                         if (!err)
3566                                 tg3_nvram_unlock(tp);
3567                 }
3568         }
3569
3570         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3571
3572         return 0;
3573 }
3574
3575 static void tg3_power_down(struct tg3 *tp)
3576 {
3577         tg3_power_down_prepare(tp);
3578
3579         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3580         pci_set_power_state(tp->pdev, PCI_D3hot);
3581 }
3582
3583 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3584 {
3585         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3586         case MII_TG3_AUX_STAT_10HALF:
3587                 *speed = SPEED_10;
3588                 *duplex = DUPLEX_HALF;
3589                 break;
3590
3591         case MII_TG3_AUX_STAT_10FULL:
3592                 *speed = SPEED_10;
3593                 *duplex = DUPLEX_FULL;
3594                 break;
3595
3596         case MII_TG3_AUX_STAT_100HALF:
3597                 *speed = SPEED_100;
3598                 *duplex = DUPLEX_HALF;
3599                 break;
3600
3601         case MII_TG3_AUX_STAT_100FULL:
3602                 *speed = SPEED_100;
3603                 *duplex = DUPLEX_FULL;
3604                 break;
3605
3606         case MII_TG3_AUX_STAT_1000HALF:
3607                 *speed = SPEED_1000;
3608                 *duplex = DUPLEX_HALF;
3609                 break;
3610
3611         case MII_TG3_AUX_STAT_1000FULL:
3612                 *speed = SPEED_1000;
3613                 *duplex = DUPLEX_FULL;
3614                 break;
3615
3616         default:
3617                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3618                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3619                                  SPEED_10;
3620                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3621                                   DUPLEX_HALF;
3622                         break;
3623                 }
3624                 *speed = SPEED_INVALID;
3625                 *duplex = DUPLEX_INVALID;
3626                 break;
3627         }
3628 }
3629
3630 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3631 {
3632         int err = 0;
3633         u32 val, new_adv;
3634
3635         new_adv = ADVERTISE_CSMA;
3636         if (advertise & ADVERTISED_10baseT_Half)
3637                 new_adv |= ADVERTISE_10HALF;
3638         if (advertise & ADVERTISED_10baseT_Full)
3639                 new_adv |= ADVERTISE_10FULL;
3640         if (advertise & ADVERTISED_100baseT_Half)
3641                 new_adv |= ADVERTISE_100HALF;
3642         if (advertise & ADVERTISED_100baseT_Full)
3643                 new_adv |= ADVERTISE_100FULL;
3644
3645         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3646
3647         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3648         if (err)
3649                 goto done;
3650
3651         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3652                 goto done;
3653
3654         new_adv = 0;
3655         if (advertise & ADVERTISED_1000baseT_Half)
3656                 new_adv |= ADVERTISE_1000HALF;
3657         if (advertise & ADVERTISED_1000baseT_Full)
3658                 new_adv |= ADVERTISE_1000FULL;
3659
3660         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3661             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3662                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3663
3664         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3665         if (err)
3666                 goto done;
3667
3668         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3669                 goto done;
3670
3671         tw32(TG3_CPMU_EEE_MODE,
3672              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3673
3674         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
3675         if (!err) {
3676                 u32 err2;
3677
3678                 val = 0;
3679                 /* Advertise 100-BaseTX EEE ability */
3680                 if (advertise & ADVERTISED_100baseT_Full)
3681                         val |= MDIO_AN_EEE_ADV_100TX;
3682                 /* Advertise 1000-BaseT EEE ability */
3683                 if (advertise & ADVERTISED_1000baseT_Full)
3684                         val |= MDIO_AN_EEE_ADV_1000T;
3685                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3686                 if (err)
3687                         val = 0;
3688
3689                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3690                 case ASIC_REV_5717:
3691                 case ASIC_REV_57765:
3692                 case ASIC_REV_5719:
3693                         /* If we advertised any eee advertisements above... */
3694                         if (val)
3695                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3696                                       MII_TG3_DSP_TAP26_RMRXSTO |
3697                                       MII_TG3_DSP_TAP26_OPCSINPT;
3698                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3699                         /* Fall through */
3700                 case ASIC_REV_5720:
3701                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3702                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3703                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3704                 }
3705
3706                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
3707                 if (!err)
3708                         err = err2;
3709         }
3710
3711 done:
3712         return err;
3713 }
3714
3715 static void tg3_phy_copper_begin(struct tg3 *tp)
3716 {
3717         u32 new_adv;
3718         int i;
3719
3720         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3721                 new_adv = ADVERTISED_10baseT_Half |
3722                           ADVERTISED_10baseT_Full;
3723                 if (tg3_flag(tp, WOL_SPEED_100MB))
3724                         new_adv |= ADVERTISED_100baseT_Half |
3725                                    ADVERTISED_100baseT_Full;
3726
3727                 tg3_phy_autoneg_cfg(tp, new_adv,
3728                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3729         } else if (tp->link_config.speed == SPEED_INVALID) {
3730                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3731                         tp->link_config.advertising &=
3732                                 ~(ADVERTISED_1000baseT_Half |
3733                                   ADVERTISED_1000baseT_Full);
3734
3735                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3736                                     tp->link_config.flowctrl);
3737         } else {
3738                 /* Asking for a specific link mode. */
3739                 if (tp->link_config.speed == SPEED_1000) {
3740                         if (tp->link_config.duplex == DUPLEX_FULL)
3741                                 new_adv = ADVERTISED_1000baseT_Full;
3742                         else
3743                                 new_adv = ADVERTISED_1000baseT_Half;
3744                 } else if (tp->link_config.speed == SPEED_100) {
3745                         if (tp->link_config.duplex == DUPLEX_FULL)
3746                                 new_adv = ADVERTISED_100baseT_Full;
3747                         else
3748                                 new_adv = ADVERTISED_100baseT_Half;
3749                 } else {
3750                         if (tp->link_config.duplex == DUPLEX_FULL)
3751                                 new_adv = ADVERTISED_10baseT_Full;
3752                         else
3753                                 new_adv = ADVERTISED_10baseT_Half;
3754                 }
3755
3756                 tg3_phy_autoneg_cfg(tp, new_adv,
3757                                     tp->link_config.flowctrl);
3758         }
3759
3760         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3761             tp->link_config.speed != SPEED_INVALID) {
3762                 u32 bmcr, orig_bmcr;
3763
3764                 tp->link_config.active_speed = tp->link_config.speed;
3765                 tp->link_config.active_duplex = tp->link_config.duplex;
3766
3767                 bmcr = 0;
3768                 switch (tp->link_config.speed) {
3769                 default:
3770                 case SPEED_10:
3771                         break;
3772
3773                 case SPEED_100:
3774                         bmcr |= BMCR_SPEED100;
3775                         break;
3776
3777                 case SPEED_1000:
3778                         bmcr |= BMCR_SPEED1000;
3779                         break;
3780                 }
3781
3782                 if (tp->link_config.duplex == DUPLEX_FULL)
3783                         bmcr |= BMCR_FULLDPLX;
3784
3785                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3786                     (bmcr != orig_bmcr)) {
3787                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3788                         for (i = 0; i < 1500; i++) {
3789                                 u32 tmp;
3790
3791                                 udelay(10);
3792                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3793                                     tg3_readphy(tp, MII_BMSR, &tmp))
3794                                         continue;
3795                                 if (!(tmp & BMSR_LSTATUS)) {
3796                                         udelay(40);
3797                                         break;
3798                                 }
3799                         }
3800                         tg3_writephy(tp, MII_BMCR, bmcr);
3801                         udelay(40);
3802                 }
3803         } else {
3804                 tg3_writephy(tp, MII_BMCR,
3805                              BMCR_ANENABLE | BMCR_ANRESTART);
3806         }
3807 }
3808
3809 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3810 {
3811         int err;
3812
3813         /* Turn off tap power management. */
3814         /* Set Extended packet length bit */
3815         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3816
3817         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3818         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3819         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3820         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3821         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3822
3823         udelay(40);
3824
3825         return err;
3826 }
3827
3828 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3829 {
3830         u32 adv_reg, all_mask = 0;
3831
3832         if (mask & ADVERTISED_10baseT_Half)
3833                 all_mask |= ADVERTISE_10HALF;
3834         if (mask & ADVERTISED_10baseT_Full)
3835                 all_mask |= ADVERTISE_10FULL;
3836         if (mask & ADVERTISED_100baseT_Half)
3837                 all_mask |= ADVERTISE_100HALF;
3838         if (mask & ADVERTISED_100baseT_Full)
3839                 all_mask |= ADVERTISE_100FULL;
3840
3841         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3842                 return 0;
3843
3844         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3845                 return 0;
3846
3847         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3848                 u32 tg3_ctrl;
3849
3850                 all_mask = 0;
3851                 if (mask & ADVERTISED_1000baseT_Half)
3852                         all_mask |= ADVERTISE_1000HALF;
3853                 if (mask & ADVERTISED_1000baseT_Full)
3854                         all_mask |= ADVERTISE_1000FULL;
3855
3856                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3857                         return 0;
3858
3859                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3860                 if (tg3_ctrl != all_mask)
3861                         return 0;
3862         }
3863
3864         return 1;
3865 }
3866
3867 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3868 {
3869         u32 curadv, reqadv;
3870
3871         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3872                 return 1;
3873
3874         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3875         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3876
3877         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3878                 if (curadv != reqadv)
3879                         return 0;
3880
3881                 if (tg3_flag(tp, PAUSE_AUTONEG))
3882                         tg3_readphy(tp, MII_LPA, rmtadv);
3883         } else {
3884                 /* Reprogram the advertisement register, even if it
3885                  * does not affect the current link.  If the link
3886                  * gets renegotiated in the future, we can save an
3887                  * additional renegotiation cycle by advertising
3888                  * it correctly in the first place.
3889                  */
3890                 if (curadv != reqadv) {
3891                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3892                                      ADVERTISE_PAUSE_ASYM);
3893                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3894                 }
3895         }
3896
3897         return 1;
3898 }
3899
3900 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3901 {
3902         int current_link_up;
3903         u32 bmsr, val;
3904         u32 lcl_adv, rmt_adv;
3905         u16 current_speed;
3906         u8 current_duplex;
3907         int i, err;
3908
3909         tw32(MAC_EVENT, 0);
3910
3911         tw32_f(MAC_STATUS,
3912              (MAC_STATUS_SYNC_CHANGED |
3913               MAC_STATUS_CFG_CHANGED |
3914               MAC_STATUS_MI_COMPLETION |
3915               MAC_STATUS_LNKSTATE_CHANGED));
3916         udelay(40);
3917
3918         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3919                 tw32_f(MAC_MI_MODE,
3920                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3921                 udelay(80);
3922         }
3923
3924         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3925
3926         /* Some third-party PHYs need to be reset on link going
3927          * down.
3928          */
3929         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3931              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3932             netif_carrier_ok(tp->dev)) {
3933                 tg3_readphy(tp, MII_BMSR, &bmsr);
3934                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3935                     !(bmsr & BMSR_LSTATUS))
3936                         force_reset = 1;
3937         }
3938         if (force_reset)
3939                 tg3_phy_reset(tp);
3940
3941         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3942                 tg3_readphy(tp, MII_BMSR, &bmsr);
3943                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3944                     !tg3_flag(tp, INIT_COMPLETE))
3945                         bmsr = 0;
3946
3947                 if (!(bmsr & BMSR_LSTATUS)) {
3948                         err = tg3_init_5401phy_dsp(tp);
3949                         if (err)
3950                                 return err;
3951
3952                         tg3_readphy(tp, MII_BMSR, &bmsr);
3953                         for (i = 0; i < 1000; i++) {
3954                                 udelay(10);
3955                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3956                                     (bmsr & BMSR_LSTATUS)) {
3957                                         udelay(40);
3958                                         break;
3959                                 }
3960                         }
3961
3962                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3963                             TG3_PHY_REV_BCM5401_B0 &&
3964                             !(bmsr & BMSR_LSTATUS) &&
3965                             tp->link_config.active_speed == SPEED_1000) {
3966                                 err = tg3_phy_reset(tp);
3967                                 if (!err)
3968                                         err = tg3_init_5401phy_dsp(tp);
3969                                 if (err)
3970                                         return err;
3971                         }
3972                 }
3973         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3974                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3975                 /* 5701 {A0,B0} CRC bug workaround */
3976                 tg3_writephy(tp, 0x15, 0x0a75);
3977                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3978                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3979                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3980         }
3981
3982         /* Clear pending interrupts... */
3983         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3984         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3985
3986         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3987                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3988         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3989                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3990
3991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3993                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3994                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3995                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3996                 else
3997                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3998         }
3999
4000         current_link_up = 0;
4001         current_speed = SPEED_INVALID;
4002         current_duplex = DUPLEX_INVALID;
4003
4004         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4005                 err = tg3_phy_auxctl_read(tp,
4006                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4007                                           &val);
4008                 if (!err && !(val & (1 << 10))) {
4009                         tg3_phy_auxctl_write(tp,
4010                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4011                                              val | (1 << 10));
4012                         goto relink;
4013                 }
4014         }
4015
4016         bmsr = 0;
4017         for (i = 0; i < 100; i++) {
4018                 tg3_readphy(tp, MII_BMSR, &bmsr);
4019                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4020                     (bmsr & BMSR_LSTATUS))
4021                         break;
4022                 udelay(40);
4023         }
4024
4025         if (bmsr & BMSR_LSTATUS) {
4026                 u32 aux_stat, bmcr;
4027
4028                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4029                 for (i = 0; i < 2000; i++) {
4030                         udelay(10);
4031                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4032                             aux_stat)
4033                                 break;
4034                 }
4035
4036                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4037                                              &current_speed,
4038                                              &current_duplex);
4039
4040                 bmcr = 0;
4041                 for (i = 0; i < 200; i++) {
4042                         tg3_readphy(tp, MII_BMCR, &bmcr);
4043                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4044                                 continue;
4045                         if (bmcr && bmcr != 0x7fff)
4046                                 break;
4047                         udelay(10);
4048                 }
4049
4050                 lcl_adv = 0;
4051                 rmt_adv = 0;
4052
4053                 tp->link_config.active_speed = current_speed;
4054                 tp->link_config.active_duplex = current_duplex;
4055
4056                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4057                         if ((bmcr & BMCR_ANENABLE) &&
4058                             tg3_copper_is_advertising_all(tp,
4059                                                 tp->link_config.advertising)) {
4060                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4061                                                                   &rmt_adv))
4062                                         current_link_up = 1;
4063                         }
4064                 } else {
4065                         if (!(bmcr & BMCR_ANENABLE) &&
4066                             tp->link_config.speed == current_speed &&
4067                             tp->link_config.duplex == current_duplex &&
4068                             tp->link_config.flowctrl ==
4069                             tp->link_config.active_flowctrl) {
4070                                 current_link_up = 1;
4071                         }
4072                 }
4073
4074                 if (current_link_up == 1 &&
4075                     tp->link_config.active_duplex == DUPLEX_FULL)
4076                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4077         }
4078
4079 relink:
4080         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4081                 tg3_phy_copper_begin(tp);
4082
4083                 tg3_readphy(tp, MII_BMSR, &bmsr);
4084                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4085                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4086                         current_link_up = 1;
4087         }
4088
4089         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4090         if (current_link_up == 1) {
4091                 if (tp->link_config.active_speed == SPEED_100 ||
4092                     tp->link_config.active_speed == SPEED_10)
4093                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4094                 else
4095                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4096         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4097                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4098         else
4099                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4100
4101         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4102         if (tp->link_config.active_duplex == DUPLEX_HALF)
4103                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4104
4105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4106                 if (current_link_up == 1 &&
4107                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4108                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4109                 else
4110                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4111         }
4112
4113         /* ??? Without this setting Netgear GA302T PHY does not
4114          * ??? send/receive packets...
4115          */
4116         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4117             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4118                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4119                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4120                 udelay(80);
4121         }
4122
4123         tw32_f(MAC_MODE, tp->mac_mode);
4124         udelay(40);
4125
4126         tg3_phy_eee_adjust(tp, current_link_up);
4127
4128         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4129                 /* Polled via timer. */
4130                 tw32_f(MAC_EVENT, 0);
4131         } else {
4132                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133         }
4134         udelay(40);
4135
4136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4137             current_link_up == 1 &&
4138             tp->link_config.active_speed == SPEED_1000 &&
4139             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4140                 udelay(120);
4141                 tw32_f(MAC_STATUS,
4142                      (MAC_STATUS_SYNC_CHANGED |
4143                       MAC_STATUS_CFG_CHANGED));
4144                 udelay(40);
4145                 tg3_write_mem(tp,
4146                               NIC_SRAM_FIRMWARE_MBOX,
4147                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4148         }
4149
4150         /* Prevent send BD corruption. */
4151         if (tg3_flag(tp, CLKREQ_BUG)) {
4152                 u16 oldlnkctl, newlnkctl;
4153
4154                 pci_read_config_word(tp->pdev,
4155                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4156                                      &oldlnkctl);
4157                 if (tp->link_config.active_speed == SPEED_100 ||
4158                     tp->link_config.active_speed == SPEED_10)
4159                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4160                 else
4161                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4162                 if (newlnkctl != oldlnkctl)
4163                         pci_write_config_word(tp->pdev,
4164                                               pci_pcie_cap(tp->pdev) +
4165                                               PCI_EXP_LNKCTL, newlnkctl);
4166         }
4167
4168         if (current_link_up != netif_carrier_ok(tp->dev)) {
4169                 if (current_link_up)
4170                         netif_carrier_on(tp->dev);
4171                 else
4172                         netif_carrier_off(tp->dev);
4173                 tg3_link_report(tp);
4174         }
4175
4176         return 0;
4177 }
4178
4179 struct tg3_fiber_aneginfo {
4180         int state;
4181 #define ANEG_STATE_UNKNOWN              0
4182 #define ANEG_STATE_AN_ENABLE            1
4183 #define ANEG_STATE_RESTART_INIT         2
4184 #define ANEG_STATE_RESTART              3
4185 #define ANEG_STATE_DISABLE_LINK_OK      4
4186 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4187 #define ANEG_STATE_ABILITY_DETECT       6
4188 #define ANEG_STATE_ACK_DETECT_INIT      7
4189 #define ANEG_STATE_ACK_DETECT           8
4190 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4191 #define ANEG_STATE_COMPLETE_ACK         10
4192 #define ANEG_STATE_IDLE_DETECT_INIT     11
4193 #define ANEG_STATE_IDLE_DETECT          12
4194 #define ANEG_STATE_LINK_OK              13
4195 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4196 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4197
4198         u32 flags;
4199 #define MR_AN_ENABLE            0x00000001
4200 #define MR_RESTART_AN           0x00000002
4201 #define MR_AN_COMPLETE          0x00000004
4202 #define MR_PAGE_RX              0x00000008
4203 #define MR_NP_LOADED            0x00000010
4204 #define MR_TOGGLE_TX            0x00000020
4205 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4206 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4207 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4208 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4209 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4210 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4211 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4212 #define MR_TOGGLE_RX            0x00002000
4213 #define MR_NP_RX                0x00004000
4214
4215 #define MR_LINK_OK              0x80000000
4216
4217         unsigned long link_time, cur_time;
4218
4219         u32 ability_match_cfg;
4220         int ability_match_count;
4221
4222         char ability_match, idle_match, ack_match;
4223
4224         u32 txconfig, rxconfig;
4225 #define ANEG_CFG_NP             0x00000080
4226 #define ANEG_CFG_ACK            0x00000040
4227 #define ANEG_CFG_RF2            0x00000020
4228 #define ANEG_CFG_RF1            0x00000010
4229 #define ANEG_CFG_PS2            0x00000001
4230 #define ANEG_CFG_PS1            0x00008000
4231 #define ANEG_CFG_HD             0x00004000
4232 #define ANEG_CFG_FD             0x00002000
4233 #define ANEG_CFG_INVAL          0x00001f06
4234
4235 };
4236 #define ANEG_OK         0
4237 #define ANEG_DONE       1
4238 #define ANEG_TIMER_ENAB 2
4239 #define ANEG_FAILED     -1
4240
4241 #define ANEG_STATE_SETTLE_TIME  10000
4242
4243 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4244                                    struct tg3_fiber_aneginfo *ap)
4245 {
4246         u16 flowctrl;
4247         unsigned long delta;
4248         u32 rx_cfg_reg;
4249         int ret;
4250
4251         if (ap->state == ANEG_STATE_UNKNOWN) {
4252                 ap->rxconfig = 0;
4253                 ap->link_time = 0;
4254                 ap->cur_time = 0;
4255                 ap->ability_match_cfg = 0;
4256                 ap->ability_match_count = 0;
4257                 ap->ability_match = 0;
4258                 ap->idle_match = 0;
4259                 ap->ack_match = 0;
4260         }
4261         ap->cur_time++;
4262
4263         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4264                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4265
4266                 if (rx_cfg_reg != ap->ability_match_cfg) {
4267                         ap->ability_match_cfg = rx_cfg_reg;
4268                         ap->ability_match = 0;
4269                         ap->ability_match_count = 0;
4270                 } else {
4271                         if (++ap->ability_match_count > 1) {
4272                                 ap->ability_match = 1;
4273                                 ap->ability_match_cfg = rx_cfg_reg;
4274                         }
4275                 }
4276                 if (rx_cfg_reg & ANEG_CFG_ACK)
4277                         ap->ack_match = 1;
4278                 else
4279                         ap->ack_match = 0;
4280
4281                 ap->idle_match = 0;
4282         } else {
4283                 ap->idle_match = 1;
4284                 ap->ability_match_cfg = 0;
4285                 ap->ability_match_count = 0;
4286                 ap->ability_match = 0;
4287                 ap->ack_match = 0;
4288
4289                 rx_cfg_reg = 0;
4290         }
4291
4292         ap->rxconfig = rx_cfg_reg;
4293         ret = ANEG_OK;
4294
4295         switch (ap->state) {
4296         case ANEG_STATE_UNKNOWN:
4297                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4298                         ap->state = ANEG_STATE_AN_ENABLE;
4299
4300                 /* fallthru */
4301         case ANEG_STATE_AN_ENABLE:
4302                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4303                 if (ap->flags & MR_AN_ENABLE) {
4304                         ap->link_time = 0;
4305                         ap->cur_time = 0;
4306                         ap->ability_match_cfg = 0;
4307                         ap->ability_match_count = 0;
4308                         ap->ability_match = 0;
4309                         ap->idle_match = 0;
4310                         ap->ack_match = 0;
4311
4312                         ap->state = ANEG_STATE_RESTART_INIT;
4313                 } else {
4314                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4315                 }
4316                 break;
4317
4318         case ANEG_STATE_RESTART_INIT:
4319                 ap->link_time = ap->cur_time;
4320                 ap->flags &= ~(MR_NP_LOADED);
4321                 ap->txconfig = 0;
4322                 tw32(MAC_TX_AUTO_NEG, 0);
4323                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4324                 tw32_f(MAC_MODE, tp->mac_mode);
4325                 udelay(40);
4326
4327                 ret = ANEG_TIMER_ENAB;
4328                 ap->state = ANEG_STATE_RESTART;
4329
4330                 /* fallthru */
4331         case ANEG_STATE_RESTART:
4332                 delta = ap->cur_time - ap->link_time;
4333                 if (delta > ANEG_STATE_SETTLE_TIME)
4334                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4335                 else
4336                         ret = ANEG_TIMER_ENAB;
4337                 break;
4338
4339         case ANEG_STATE_DISABLE_LINK_OK:
4340                 ret = ANEG_DONE;
4341                 break;
4342
4343         case ANEG_STATE_ABILITY_DETECT_INIT:
4344                 ap->flags &= ~(MR_TOGGLE_TX);
4345                 ap->txconfig = ANEG_CFG_FD;
4346                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4347                 if (flowctrl & ADVERTISE_1000XPAUSE)
4348                         ap->txconfig |= ANEG_CFG_PS1;
4349                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4350                         ap->txconfig |= ANEG_CFG_PS2;
4351                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4352                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4353                 tw32_f(MAC_MODE, tp->mac_mode);
4354                 udelay(40);
4355
4356                 ap->state = ANEG_STATE_ABILITY_DETECT;
4357                 break;
4358
4359         case ANEG_STATE_ABILITY_DETECT:
4360                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4361                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4362                 break;
4363
4364         case ANEG_STATE_ACK_DETECT_INIT:
4365                 ap->txconfig |= ANEG_CFG_ACK;
4366                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4367                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4368                 tw32_f(MAC_MODE, tp->mac_mode);
4369                 udelay(40);
4370
4371                 ap->state = ANEG_STATE_ACK_DETECT;
4372
4373                 /* fallthru */
4374         case ANEG_STATE_ACK_DETECT:
4375                 if (ap->ack_match != 0) {
4376                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4377                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4378                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4379                         } else {
4380                                 ap->state = ANEG_STATE_AN_ENABLE;
4381                         }
4382                 } else if (ap->ability_match != 0 &&
4383                            ap->rxconfig == 0) {
4384                         ap->state = ANEG_STATE_AN_ENABLE;
4385                 }
4386                 break;
4387
4388         case ANEG_STATE_COMPLETE_ACK_INIT:
4389                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4390                         ret = ANEG_FAILED;
4391                         break;
4392                 }
4393                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4394                                MR_LP_ADV_HALF_DUPLEX |
4395                                MR_LP_ADV_SYM_PAUSE |
4396                                MR_LP_ADV_ASYM_PAUSE |
4397                                MR_LP_ADV_REMOTE_FAULT1 |
4398                                MR_LP_ADV_REMOTE_FAULT2 |
4399                                MR_LP_ADV_NEXT_PAGE |
4400                                MR_TOGGLE_RX |
4401                                MR_NP_RX);
4402                 if (ap->rxconfig & ANEG_CFG_FD)
4403                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4404                 if (ap->rxconfig & ANEG_CFG_HD)
4405                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4406                 if (ap->rxconfig & ANEG_CFG_PS1)
4407                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4408                 if (ap->rxconfig & ANEG_CFG_PS2)
4409                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4410                 if (ap->rxconfig & ANEG_CFG_RF1)
4411                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4412                 if (ap->rxconfig & ANEG_CFG_RF2)
4413                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4414                 if (ap->rxconfig & ANEG_CFG_NP)
4415                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4416
4417                 ap->link_time = ap->cur_time;
4418
4419                 ap->flags ^= (MR_TOGGLE_TX);
4420                 if (ap->rxconfig & 0x0008)
4421                         ap->flags |= MR_TOGGLE_RX;
4422                 if (ap->rxconfig & ANEG_CFG_NP)
4423                         ap->flags |= MR_NP_RX;
4424                 ap->flags |= MR_PAGE_RX;
4425
4426                 ap->state = ANEG_STATE_COMPLETE_ACK;
4427                 ret = ANEG_TIMER_ENAB;
4428                 break;
4429
4430         case ANEG_STATE_COMPLETE_ACK:
4431                 if (ap->ability_match != 0 &&
4432                     ap->rxconfig == 0) {
4433                         ap->state = ANEG_STATE_AN_ENABLE;
4434                         break;
4435                 }
4436                 delta = ap->cur_time - ap->link_time;
4437                 if (delta > ANEG_STATE_SETTLE_TIME) {
4438                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4439                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4440                         } else {
4441                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4442                                     !(ap->flags & MR_NP_RX)) {
4443                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4444                                 } else {
4445                                         ret = ANEG_FAILED;
4446                                 }
4447                         }
4448                 }
4449                 break;
4450
4451         case ANEG_STATE_IDLE_DETECT_INIT:
4452                 ap->link_time = ap->cur_time;
4453                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4454                 tw32_f(MAC_MODE, tp->mac_mode);
4455                 udelay(40);
4456
4457                 ap->state = ANEG_STATE_IDLE_DETECT;
4458                 ret = ANEG_TIMER_ENAB;
4459                 break;
4460
4461         case ANEG_STATE_IDLE_DETECT:
4462                 if (ap->ability_match != 0 &&
4463                     ap->rxconfig == 0) {
4464                         ap->state = ANEG_STATE_AN_ENABLE;
4465                         break;
4466                 }
4467                 delta = ap->cur_time - ap->link_time;
4468                 if (delta > ANEG_STATE_SETTLE_TIME) {
4469                         /* XXX another gem from the Broadcom driver :( */
4470                         ap->state = ANEG_STATE_LINK_OK;
4471                 }
4472                 break;
4473
4474         case ANEG_STATE_LINK_OK:
4475                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4476                 ret = ANEG_DONE;
4477                 break;
4478
4479         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4480                 /* ??? unimplemented */
4481                 break;
4482
4483         case ANEG_STATE_NEXT_PAGE_WAIT:
4484                 /* ??? unimplemented */
4485                 break;
4486
4487         default:
4488                 ret = ANEG_FAILED;
4489                 break;
4490         }
4491
4492         return ret;
4493 }
4494
4495 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4496 {
4497         int res = 0;
4498         struct tg3_fiber_aneginfo aninfo;
4499         int status = ANEG_FAILED;
4500         unsigned int tick;
4501         u32 tmp;
4502
4503         tw32_f(MAC_TX_AUTO_NEG, 0);
4504
4505         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4506         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4507         udelay(40);
4508
4509         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4510         udelay(40);
4511
4512         memset(&aninfo, 0, sizeof(aninfo));
4513         aninfo.flags |= MR_AN_ENABLE;
4514         aninfo.state = ANEG_STATE_UNKNOWN;
4515         aninfo.cur_time = 0;
4516         tick = 0;
4517         while (++tick < 195000) {
4518                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4519                 if (status == ANEG_DONE || status == ANEG_FAILED)
4520                         break;
4521
4522                 udelay(1);
4523         }
4524
4525         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4526         tw32_f(MAC_MODE, tp->mac_mode);
4527         udelay(40);
4528
4529         *txflags = aninfo.txconfig;
4530         *rxflags = aninfo.flags;
4531
4532         if (status == ANEG_DONE &&
4533             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4534                              MR_LP_ADV_FULL_DUPLEX)))
4535                 res = 1;
4536
4537         return res;
4538 }
4539
4540 static void tg3_init_bcm8002(struct tg3 *tp)
4541 {
4542         u32 mac_status = tr32(MAC_STATUS);
4543         int i;
4544
4545         /* Reset when initting first time or we have a link. */
4546         if (tg3_flag(tp, INIT_COMPLETE) &&
4547             !(mac_status & MAC_STATUS_PCS_SYNCED))
4548                 return;
4549
4550         /* Set PLL lock range. */
4551         tg3_writephy(tp, 0x16, 0x8007);
4552
4553         /* SW reset */
4554         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4555
4556         /* Wait for reset to complete. */
4557         /* XXX schedule_timeout() ... */
4558         for (i = 0; i < 500; i++)
4559                 udelay(10);
4560
4561         /* Config mode; select PMA/Ch 1 regs. */
4562         tg3_writephy(tp, 0x10, 0x8411);
4563
4564         /* Enable auto-lock and comdet, select txclk for tx. */
4565         tg3_writephy(tp, 0x11, 0x0a10);
4566
4567         tg3_writephy(tp, 0x18, 0x00a0);
4568         tg3_writephy(tp, 0x16, 0x41ff);
4569
4570         /* Assert and deassert POR. */
4571         tg3_writephy(tp, 0x13, 0x0400);
4572         udelay(40);
4573         tg3_writephy(tp, 0x13, 0x0000);
4574
4575         tg3_writephy(tp, 0x11, 0x0a50);
4576         udelay(40);
4577         tg3_writephy(tp, 0x11, 0x0a10);
4578
4579         /* Wait for signal to stabilize */
4580         /* XXX schedule_timeout() ... */
4581         for (i = 0; i < 15000; i++)
4582                 udelay(10);
4583
4584         /* Deselect the channel register so we can read the PHYID
4585          * later.
4586          */
4587         tg3_writephy(tp, 0x10, 0x8011);
4588 }
4589
4590 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4591 {
4592         u16 flowctrl;
4593         u32 sg_dig_ctrl, sg_dig_status;
4594         u32 serdes_cfg, expected_sg_dig_ctrl;
4595         int workaround, port_a;
4596         int current_link_up;
4597
4598         serdes_cfg = 0;
4599         expected_sg_dig_ctrl = 0;
4600         workaround = 0;
4601         port_a = 1;
4602         current_link_up = 0;
4603
4604         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4605             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4606                 workaround = 1;
4607                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4608                         port_a = 0;
4609
4610                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4611                 /* preserve bits 20-23 for voltage regulator */
4612                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4613         }
4614
4615         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4616
4617         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4618                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4619                         if (workaround) {
4620                                 u32 val = serdes_cfg;
4621
4622                                 if (port_a)
4623                                         val |= 0xc010000;
4624                                 else
4625                                         val |= 0x4010000;
4626                                 tw32_f(MAC_SERDES_CFG, val);
4627                         }
4628
4629                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4630                 }
4631                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4632                         tg3_setup_flow_control(tp, 0, 0);
4633                         current_link_up = 1;
4634                 }
4635                 goto out;
4636         }
4637
4638         /* Want auto-negotiation.  */
4639         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4640
4641         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4642         if (flowctrl & ADVERTISE_1000XPAUSE)
4643                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4644         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4645                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4646
4647         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4648                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4649                     tp->serdes_counter &&
4650                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4651                                     MAC_STATUS_RCVD_CFG)) ==
4652                      MAC_STATUS_PCS_SYNCED)) {
4653                         tp->serdes_counter--;
4654                         current_link_up = 1;
4655                         goto out;
4656                 }
4657 restart_autoneg:
4658                 if (workaround)
4659                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4660                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4661                 udelay(5);
4662                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4663
4664                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4665                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4666         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4667                                  MAC_STATUS_SIGNAL_DET)) {
4668                 sg_dig_status = tr32(SG_DIG_STATUS);
4669                 mac_status = tr32(MAC_STATUS);
4670
4671                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4672                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4673                         u32 local_adv = 0, remote_adv = 0;
4674
4675                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4676                                 local_adv |= ADVERTISE_1000XPAUSE;
4677                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4678                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4679
4680                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4681                                 remote_adv |= LPA_1000XPAUSE;
4682                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4683                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4684
4685                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4686                         current_link_up = 1;
4687                         tp->serdes_counter = 0;
4688                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4689                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4690                         if (tp->serdes_counter)
4691                                 tp->serdes_counter--;
4692                         else {
4693                                 if (workaround) {
4694                                         u32 val = serdes_cfg;
4695
4696                                         if (port_a)
4697                                                 val |= 0xc010000;
4698                                         else
4699                                                 val |= 0x4010000;
4700
4701                                         tw32_f(MAC_SERDES_CFG, val);
4702                                 }
4703
4704                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4705                                 udelay(40);
4706
4707                                 /* Link parallel detection - link is up */
4708                                 /* only if we have PCS_SYNC and not */
4709                                 /* receiving config code words */
4710                                 mac_status = tr32(MAC_STATUS);
4711                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4712                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4713                                         tg3_setup_flow_control(tp, 0, 0);
4714                                         current_link_up = 1;
4715                                         tp->phy_flags |=
4716                                                 TG3_PHYFLG_PARALLEL_DETECT;
4717                                         tp->serdes_counter =
4718                                                 SERDES_PARALLEL_DET_TIMEOUT;
4719                                 } else
4720                                         goto restart_autoneg;
4721                         }
4722                 }
4723         } else {
4724                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4725                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4726         }
4727
4728 out:
4729         return current_link_up;
4730 }
4731
4732 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4733 {
4734         int current_link_up = 0;
4735
4736         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4737                 goto out;
4738
4739         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4740                 u32 txflags, rxflags;
4741                 int i;
4742
4743                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4744                         u32 local_adv = 0, remote_adv = 0;
4745
4746                         if (txflags & ANEG_CFG_PS1)
4747                                 local_adv |= ADVERTISE_1000XPAUSE;
4748                         if (txflags & ANEG_CFG_PS2)
4749                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4750
4751                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4752                                 remote_adv |= LPA_1000XPAUSE;
4753                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4754                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4755
4756                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4757
4758                         current_link_up = 1;
4759                 }
4760                 for (i = 0; i < 30; i++) {
4761                         udelay(20);
4762                         tw32_f(MAC_STATUS,
4763                                (MAC_STATUS_SYNC_CHANGED |
4764                                 MAC_STATUS_CFG_CHANGED));
4765                         udelay(40);
4766                         if ((tr32(MAC_STATUS) &
4767                              (MAC_STATUS_SYNC_CHANGED |
4768                               MAC_STATUS_CFG_CHANGED)) == 0)
4769                                 break;
4770                 }
4771
4772                 mac_status = tr32(MAC_STATUS);
4773                 if (current_link_up == 0 &&
4774                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4775                     !(mac_status & MAC_STATUS_RCVD_CFG))
4776                         current_link_up = 1;
4777         } else {
4778                 tg3_setup_flow_control(tp, 0, 0);
4779
4780                 /* Forcing 1000FD link up. */
4781                 current_link_up = 1;
4782
4783                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4784                 udelay(40);
4785
4786                 tw32_f(MAC_MODE, tp->mac_mode);
4787                 udelay(40);
4788         }
4789
4790 out:
4791         return current_link_up;
4792 }
4793
4794 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4795 {
4796         u32 orig_pause_cfg;
4797         u16 orig_active_speed;
4798         u8 orig_active_duplex;
4799         u32 mac_status;
4800         int current_link_up;
4801         int i;
4802
4803         orig_pause_cfg = tp->link_config.active_flowctrl;
4804         orig_active_speed = tp->link_config.active_speed;
4805         orig_active_duplex = tp->link_config.active_duplex;
4806
4807         if (!tg3_flag(tp, HW_AUTONEG) &&
4808             netif_carrier_ok(tp->dev) &&
4809             tg3_flag(tp, INIT_COMPLETE)) {
4810                 mac_status = tr32(MAC_STATUS);
4811                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4812                                MAC_STATUS_SIGNAL_DET |
4813                                MAC_STATUS_CFG_CHANGED |
4814                                MAC_STATUS_RCVD_CFG);
4815                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4816                                    MAC_STATUS_SIGNAL_DET)) {
4817                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4818                                             MAC_STATUS_CFG_CHANGED));
4819                         return 0;
4820                 }
4821         }
4822
4823         tw32_f(MAC_TX_AUTO_NEG, 0);
4824
4825         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4826         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4827         tw32_f(MAC_MODE, tp->mac_mode);
4828         udelay(40);
4829
4830         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4831                 tg3_init_bcm8002(tp);
4832
4833         /* Enable link change event even when serdes polling.  */
4834         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4835         udelay(40);
4836
4837         current_link_up = 0;
4838         mac_status = tr32(MAC_STATUS);
4839
4840         if (tg3_flag(tp, HW_AUTONEG))
4841                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4842         else
4843                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4844
4845         tp->napi[0].hw_status->status =
4846                 (SD_STATUS_UPDATED |
4847                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4848
4849         for (i = 0; i < 100; i++) {
4850                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4851                                     MAC_STATUS_CFG_CHANGED));
4852                 udelay(5);
4853                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4854                                          MAC_STATUS_CFG_CHANGED |
4855                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4856                         break;
4857         }
4858
4859         mac_status = tr32(MAC_STATUS);
4860         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4861                 current_link_up = 0;
4862                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4863                     tp->serdes_counter == 0) {
4864                         tw32_f(MAC_MODE, (tp->mac_mode |
4865                                           MAC_MODE_SEND_CONFIGS));
4866                         udelay(1);
4867                         tw32_f(MAC_MODE, tp->mac_mode);
4868                 }
4869         }
4870
4871         if (current_link_up == 1) {
4872                 tp->link_config.active_speed = SPEED_1000;
4873                 tp->link_config.active_duplex = DUPLEX_FULL;
4874                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4875                                     LED_CTRL_LNKLED_OVERRIDE |
4876                                     LED_CTRL_1000MBPS_ON));
4877         } else {
4878                 tp->link_config.active_speed = SPEED_INVALID;
4879                 tp->link_config.active_duplex = DUPLEX_INVALID;
4880                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4881                                     LED_CTRL_LNKLED_OVERRIDE |
4882                                     LED_CTRL_TRAFFIC_OVERRIDE));
4883         }
4884
4885         if (current_link_up != netif_carrier_ok(tp->dev)) {
4886                 if (current_link_up)
4887                         netif_carrier_on(tp->dev);
4888                 else
4889                         netif_carrier_off(tp->dev);
4890                 tg3_link_report(tp);
4891         } else {
4892                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4893                 if (orig_pause_cfg != now_pause_cfg ||
4894                     orig_active_speed != tp->link_config.active_speed ||
4895                     orig_active_duplex != tp->link_config.active_duplex)
4896                         tg3_link_report(tp);
4897         }
4898
4899         return 0;
4900 }
4901
4902 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4903 {
4904         int current_link_up, err = 0;
4905         u32 bmsr, bmcr;
4906         u16 current_speed;
4907         u8 current_duplex;
4908         u32 local_adv, remote_adv;
4909
4910         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4911         tw32_f(MAC_MODE, tp->mac_mode);
4912         udelay(40);
4913
4914         tw32(MAC_EVENT, 0);
4915
4916         tw32_f(MAC_STATUS,
4917              (MAC_STATUS_SYNC_CHANGED |
4918               MAC_STATUS_CFG_CHANGED |
4919               MAC_STATUS_MI_COMPLETION |
4920               MAC_STATUS_LNKSTATE_CHANGED));
4921         udelay(40);
4922
4923         if (force_reset)
4924                 tg3_phy_reset(tp);
4925
4926         current_link_up = 0;
4927         current_speed = SPEED_INVALID;
4928         current_duplex = DUPLEX_INVALID;
4929
4930         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4931         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4933                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4934                         bmsr |= BMSR_LSTATUS;
4935                 else
4936                         bmsr &= ~BMSR_LSTATUS;
4937         }
4938
4939         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4940
4941         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4942             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4943                 /* do nothing, just check for link up at the end */
4944         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4945                 u32 adv, new_adv;
4946
4947                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4948                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4949                                   ADVERTISE_1000XPAUSE |
4950                                   ADVERTISE_1000XPSE_ASYM |
4951                                   ADVERTISE_SLCT);
4952
4953                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4954
4955                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4956                         new_adv |= ADVERTISE_1000XHALF;
4957                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4958                         new_adv |= ADVERTISE_1000XFULL;
4959
4960                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4961                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4962                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4963                         tg3_writephy(tp, MII_BMCR, bmcr);
4964
4965                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4966                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4967                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4968
4969                         return err;
4970                 }
4971         } else {
4972                 u32 new_bmcr;
4973
4974                 bmcr &= ~BMCR_SPEED1000;
4975                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4976
4977                 if (tp->link_config.duplex == DUPLEX_FULL)
4978                         new_bmcr |= BMCR_FULLDPLX;
4979
4980                 if (new_bmcr != bmcr) {
4981                         /* BMCR_SPEED1000 is a reserved bit that needs
4982                          * to be set on write.
4983                          */
4984                         new_bmcr |= BMCR_SPEED1000;
4985
4986                         /* Force a linkdown */
4987                         if (netif_carrier_ok(tp->dev)) {
4988                                 u32 adv;
4989
4990                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4991                                 adv &= ~(ADVERTISE_1000XFULL |
4992                                          ADVERTISE_1000XHALF |
4993                                          ADVERTISE_SLCT);
4994                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4995                                 tg3_writephy(tp, MII_BMCR, bmcr |
4996                                                            BMCR_ANRESTART |
4997                                                            BMCR_ANENABLE);
4998                                 udelay(10);
4999                                 netif_carrier_off(tp->dev);
5000                         }
5001                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5002                         bmcr = new_bmcr;
5003                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5004                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5005                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5006                             ASIC_REV_5714) {
5007                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5008                                         bmsr |= BMSR_LSTATUS;
5009                                 else
5010                                         bmsr &= ~BMSR_LSTATUS;
5011                         }
5012                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5013                 }
5014         }
5015
5016         if (bmsr & BMSR_LSTATUS) {
5017                 current_speed = SPEED_1000;
5018                 current_link_up = 1;
5019                 if (bmcr & BMCR_FULLDPLX)
5020                         current_duplex = DUPLEX_FULL;
5021                 else
5022                         current_duplex = DUPLEX_HALF;
5023
5024                 local_adv = 0;
5025                 remote_adv = 0;
5026
5027                 if (bmcr & BMCR_ANENABLE) {
5028                         u32 common;
5029
5030                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5031                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5032                         common = local_adv & remote_adv;
5033                         if (common & (ADVERTISE_1000XHALF |
5034                                       ADVERTISE_1000XFULL)) {
5035                                 if (common & ADVERTISE_1000XFULL)
5036                                         current_duplex = DUPLEX_FULL;
5037                                 else
5038                                         current_duplex = DUPLEX_HALF;
5039                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5040                                 /* Link is up via parallel detect */
5041                         } else {
5042                                 current_link_up = 0;
5043                         }
5044                 }
5045         }
5046
5047         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5048                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5049
5050         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5051         if (tp->link_config.active_duplex == DUPLEX_HALF)
5052                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5053
5054         tw32_f(MAC_MODE, tp->mac_mode);
5055         udelay(40);
5056
5057         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058
5059         tp->link_config.active_speed = current_speed;
5060         tp->link_config.active_duplex = current_duplex;
5061
5062         if (current_link_up != netif_carrier_ok(tp->dev)) {
5063                 if (current_link_up)
5064                         netif_carrier_on(tp->dev);
5065                 else {
5066                         netif_carrier_off(tp->dev);
5067                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5068                 }
5069                 tg3_link_report(tp);
5070         }
5071         return err;
5072 }
5073
5074 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5075 {
5076         if (tp->serdes_counter) {
5077                 /* Give autoneg time to complete. */
5078                 tp->serdes_counter--;
5079                 return;
5080         }
5081
5082         if (!netif_carrier_ok(tp->dev) &&
5083             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5084                 u32 bmcr;
5085
5086                 tg3_readphy(tp, MII_BMCR, &bmcr);
5087                 if (bmcr & BMCR_ANENABLE) {
5088                         u32 phy1, phy2;
5089
5090                         /* Select shadow register 0x1f */
5091                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5092                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5093
5094                         /* Select expansion interrupt status register */
5095                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5096                                          MII_TG3_DSP_EXP1_INT_STAT);
5097                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5098                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5099
5100                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5101                                 /* We have signal detect and not receiving
5102                                  * config code words, link is up by parallel
5103                                  * detection.
5104                                  */
5105
5106                                 bmcr &= ~BMCR_ANENABLE;
5107                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5108                                 tg3_writephy(tp, MII_BMCR, bmcr);
5109                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5110                         }
5111                 }
5112         } else if (netif_carrier_ok(tp->dev) &&
5113                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5114                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5115                 u32 phy2;
5116
5117                 /* Select expansion interrupt status register */
5118                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5119                                  MII_TG3_DSP_EXP1_INT_STAT);
5120                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5121                 if (phy2 & 0x20) {
5122                         u32 bmcr;
5123
5124                         /* Config code words received, turn on autoneg. */
5125                         tg3_readphy(tp, MII_BMCR, &bmcr);
5126                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5127
5128                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5129
5130                 }
5131         }
5132 }
5133
5134 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5135 {
5136         u32 val;
5137         int err;
5138
5139         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5140                 err = tg3_setup_fiber_phy(tp, force_reset);
5141         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5142                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5143         else
5144                 err = tg3_setup_copper_phy(tp, force_reset);
5145
5146         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5147                 u32 scale;
5148
5149                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5150                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5151                         scale = 65;
5152                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5153                         scale = 6;
5154                 else
5155                         scale = 12;
5156
5157                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5158                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5159                 tw32(GRC_MISC_CFG, val);
5160         }
5161
5162         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5163               (6 << TX_LENGTHS_IPG_SHIFT);
5164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5165                 val |= tr32(MAC_TX_LENGTHS) &
5166                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5167                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5168
5169         if (tp->link_config.active_speed == SPEED_1000 &&
5170             tp->link_config.active_duplex == DUPLEX_HALF)
5171                 tw32(MAC_TX_LENGTHS, val |
5172                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5173         else
5174                 tw32(MAC_TX_LENGTHS, val |
5175                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5176
5177         if (!tg3_flag(tp, 5705_PLUS)) {
5178                 if (netif_carrier_ok(tp->dev)) {
5179                         tw32(HOSTCC_STAT_COAL_TICKS,
5180                              tp->coal.stats_block_coalesce_usecs);
5181                 } else {
5182                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5183                 }
5184         }
5185
5186         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5187                 val = tr32(PCIE_PWR_MGMT_THRESH);
5188                 if (!netif_carrier_ok(tp->dev))
5189                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5190                               tp->pwrmgmt_thresh;
5191                 else
5192                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5193                 tw32(PCIE_PWR_MGMT_THRESH, val);
5194         }
5195
5196         return err;
5197 }
5198
5199 static inline int tg3_irq_sync(struct tg3 *tp)
5200 {
5201         return tp->irq_sync;
5202 }
5203
5204 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5205 {
5206         int i;
5207
5208         dst = (u32 *)((u8 *)dst + off);
5209         for (i = 0; i < len; i += sizeof(u32))
5210                 *dst++ = tr32(off + i);
5211 }
5212
5213 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5214 {
5215         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5216         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5217         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5218         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5219         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5220         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5221         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5222         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5223         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5224         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5225         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5226         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5227         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5228         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5229         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5230         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5231         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5232         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5233         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5234
5235         if (tg3_flag(tp, SUPPORT_MSIX))
5236                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5237
5238         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5239         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5240         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5241         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5242         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5243         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5244         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5245         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5246
5247         if (!tg3_flag(tp, 5705_PLUS)) {
5248                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5249                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5250                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5251         }
5252
5253         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5254         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5255         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5256         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5257         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5258
5259         if (tg3_flag(tp, NVRAM))
5260                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5261 }
5262
5263 static void tg3_dump_state(struct tg3 *tp)
5264 {
5265         int i;
5266         u32 *regs;
5267
5268         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5269         if (!regs) {
5270                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5271                 return;
5272         }
5273
5274         if (tg3_flag(tp, PCI_EXPRESS)) {
5275                 /* Read up to but not including private PCI registers */
5276                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5277                         regs[i / sizeof(u32)] = tr32(i);
5278         } else
5279                 tg3_dump_legacy_regs(tp, regs);
5280
5281         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5282                 if (!regs[i + 0] && !regs[i + 1] &&
5283                     !regs[i + 2] && !regs[i + 3])
5284                         continue;
5285
5286                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5287                            i * 4,
5288                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5289         }
5290
5291         kfree(regs);
5292
5293         for (i = 0; i < tp->irq_cnt; i++) {
5294                 struct tg3_napi *tnapi = &tp->napi[i];
5295
5296                 /* SW status block */
5297                 netdev_err(tp->dev,
5298                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5299                            i,
5300                            tnapi->hw_status->status,
5301                            tnapi->hw_status->status_tag,
5302                            tnapi->hw_status->rx_jumbo_consumer,
5303                            tnapi->hw_status->rx_consumer,
5304                            tnapi->hw_status->rx_mini_consumer,
5305                            tnapi->hw_status->idx[0].rx_producer,
5306                            tnapi->hw_status->idx[0].tx_consumer);
5307
5308                 netdev_err(tp->dev,
5309                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5310                            i,
5311                            tnapi->last_tag, tnapi->last_irq_tag,
5312                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5313                            tnapi->rx_rcb_ptr,
5314                            tnapi->prodring.rx_std_prod_idx,
5315                            tnapi->prodring.rx_std_cons_idx,
5316                            tnapi->prodring.rx_jmb_prod_idx,
5317                            tnapi->prodring.rx_jmb_cons_idx);
5318         }
5319 }
5320
5321 /* This is called whenever we suspect that the system chipset is re-
5322  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5323  * is bogus tx completions. We try to recover by setting the
5324  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5325  * in the workqueue.
5326  */
5327 static void tg3_tx_recover(struct tg3 *tp)
5328 {
5329         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5330                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5331
5332         netdev_warn(tp->dev,
5333                     "The system may be re-ordering memory-mapped I/O "
5334                     "cycles to the network device, attempting to recover. "
5335                     "Please report the problem to the driver maintainer "
5336                     "and include system chipset information.\n");
5337
5338         spin_lock(&tp->lock);
5339         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5340         spin_unlock(&tp->lock);
5341 }
5342
5343 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5344 {
5345         /* Tell compiler to fetch tx indices from memory. */
5346         barrier();
5347         return tnapi->tx_pending -
5348                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5349 }
5350
5351 /* Tigon3 never reports partial packet sends.  So we do not
5352  * need special logic to handle SKBs that have not had all
5353  * of their frags sent yet, like SunGEM does.
5354  */
5355 static void tg3_tx(struct tg3_napi *tnapi)
5356 {
5357         struct tg3 *tp = tnapi->tp;
5358         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5359         u32 sw_idx = tnapi->tx_cons;
5360         struct netdev_queue *txq;
5361         int index = tnapi - tp->napi;
5362
5363         if (tg3_flag(tp, ENABLE_TSS))
5364                 index--;
5365
5366         txq = netdev_get_tx_queue(tp->dev, index);
5367
5368         while (sw_idx != hw_idx) {
5369                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5370                 struct sk_buff *skb = ri->skb;
5371                 int i, tx_bug = 0;
5372
5373                 if (unlikely(skb == NULL)) {
5374                         tg3_tx_recover(tp);
5375                         return;
5376                 }
5377
5378                 pci_unmap_single(tp->pdev,
5379                                  dma_unmap_addr(ri, mapping),
5380                                  skb_headlen(skb),
5381                                  PCI_DMA_TODEVICE);
5382
5383                 ri->skb = NULL;
5384
5385                 while (ri->fragmented) {
5386                         ri->fragmented = false;
5387                         sw_idx = NEXT_TX(sw_idx);
5388                         ri = &tnapi->tx_buffers[sw_idx];
5389                 }
5390
5391                 sw_idx = NEXT_TX(sw_idx);
5392
5393                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5394                         ri = &tnapi->tx_buffers[sw_idx];
5395                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5396                                 tx_bug = 1;
5397
5398                         pci_unmap_page(tp->pdev,
5399                                        dma_unmap_addr(ri, mapping),
5400                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5401                                        PCI_DMA_TODEVICE);
5402
5403                         while (ri->fragmented) {
5404                                 ri->fragmented = false;
5405                                 sw_idx = NEXT_TX(sw_idx);
5406                                 ri = &tnapi->tx_buffers[sw_idx];
5407                         }
5408
5409                         sw_idx = NEXT_TX(sw_idx);
5410                 }
5411
5412                 dev_kfree_skb(skb);
5413
5414                 if (unlikely(tx_bug)) {
5415                         tg3_tx_recover(tp);
5416                         return;
5417                 }
5418         }
5419
5420         tnapi->tx_cons = sw_idx;
5421
5422         /* Need to make the tx_cons update visible to tg3_start_xmit()
5423          * before checking for netif_queue_stopped().  Without the
5424          * memory barrier, there is a small possibility that tg3_start_xmit()
5425          * will miss it and cause the queue to be stopped forever.
5426          */
5427         smp_mb();
5428
5429         if (unlikely(netif_tx_queue_stopped(txq) &&
5430                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5431                 __netif_tx_lock(txq, smp_processor_id());
5432                 if (netif_tx_queue_stopped(txq) &&
5433                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5434                         netif_tx_wake_queue(txq);
5435                 __netif_tx_unlock(txq);
5436         }
5437 }
5438
5439 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5440 {
5441         if (!ri->skb)
5442                 return;
5443
5444         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5445                          map_sz, PCI_DMA_FROMDEVICE);
5446         dev_kfree_skb_any(ri->skb);
5447         ri->skb = NULL;
5448 }
5449
5450 /* Returns size of skb allocated or < 0 on error.
5451  *
5452  * We only need to fill in the address because the other members
5453  * of the RX descriptor are invariant, see tg3_init_rings.
5454  *
5455  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5456  * posting buffers we only dirty the first cache line of the RX
5457  * descriptor (containing the address).  Whereas for the RX status
5458  * buffers the cpu only reads the last cacheline of the RX descriptor
5459  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5460  */
5461 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5462                             u32 opaque_key, u32 dest_idx_unmasked)
5463 {
5464         struct tg3_rx_buffer_desc *desc;
5465         struct ring_info *map;
5466         struct sk_buff *skb;
5467         dma_addr_t mapping;
5468         int skb_size, dest_idx;
5469
5470         switch (opaque_key) {
5471         case RXD_OPAQUE_RING_STD:
5472                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5473                 desc = &tpr->rx_std[dest_idx];
5474                 map = &tpr->rx_std_buffers[dest_idx];
5475                 skb_size = tp->rx_pkt_map_sz;
5476                 break;
5477
5478         case RXD_OPAQUE_RING_JUMBO:
5479                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5480                 desc = &tpr->rx_jmb[dest_idx].std;
5481                 map = &tpr->rx_jmb_buffers[dest_idx];
5482                 skb_size = TG3_RX_JMB_MAP_SZ;
5483                 break;
5484
5485         default:
5486                 return -EINVAL;
5487         }
5488
5489         /* Do not overwrite any of the map or rp information
5490          * until we are sure we can commit to a new buffer.
5491          *
5492          * Callers depend upon this behavior and assume that
5493          * we leave everything unchanged if we fail.
5494          */
5495         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5496         if (skb == NULL)
5497                 return -ENOMEM;
5498
5499         skb_reserve(skb, TG3_RX_OFFSET(tp));
5500
5501         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5502                                  PCI_DMA_FROMDEVICE);
5503         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5504                 dev_kfree_skb(skb);
5505                 return -EIO;
5506         }
5507
5508         map->skb = skb;
5509         dma_unmap_addr_set(map, mapping, mapping);
5510
5511         desc->addr_hi = ((u64)mapping >> 32);
5512         desc->addr_lo = ((u64)mapping & 0xffffffff);
5513
5514         return skb_size;
5515 }
5516
5517 /* We only need to move over in the address because the other
5518  * members of the RX descriptor are invariant.  See notes above
5519  * tg3_alloc_rx_skb for full details.
5520  */
5521 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5522                            struct tg3_rx_prodring_set *dpr,
5523                            u32 opaque_key, int src_idx,
5524                            u32 dest_idx_unmasked)
5525 {
5526         struct tg3 *tp = tnapi->tp;
5527         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5528         struct ring_info *src_map, *dest_map;
5529         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5530         int dest_idx;
5531
5532         switch (opaque_key) {
5533         case RXD_OPAQUE_RING_STD:
5534                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5535                 dest_desc = &dpr->rx_std[dest_idx];
5536                 dest_map = &dpr->rx_std_buffers[dest_idx];
5537                 src_desc = &spr->rx_std[src_idx];
5538                 src_map = &spr->rx_std_buffers[src_idx];
5539                 break;
5540
5541         case RXD_OPAQUE_RING_JUMBO:
5542                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5543                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5544                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5545                 src_desc = &spr->rx_jmb[src_idx].std;
5546                 src_map = &spr->rx_jmb_buffers[src_idx];
5547                 break;
5548
5549         default:
5550                 return;
5551         }
5552
5553         dest_map->skb = src_map->skb;
5554         dma_unmap_addr_set(dest_map, mapping,
5555                            dma_unmap_addr(src_map, mapping));
5556         dest_desc->addr_hi = src_desc->addr_hi;
5557         dest_desc->addr_lo = src_desc->addr_lo;
5558
5559         /* Ensure that the update to the skb happens after the physical
5560          * addresses have been transferred to the new BD location.
5561          */
5562         smp_wmb();
5563
5564         src_map->skb = NULL;
5565 }
5566
5567 /* The RX ring scheme is composed of multiple rings which post fresh
5568  * buffers to the chip, and one special ring the chip uses to report
5569  * status back to the host.
5570  *
5571  * The special ring reports the status of received packets to the
5572  * host.  The chip does not write into the original descriptor the
5573  * RX buffer was obtained from.  The chip simply takes the original
5574  * descriptor as provided by the host, updates the status and length
5575  * field, then writes this into the next status ring entry.
5576  *
5577  * Each ring the host uses to post buffers to the chip is described
5578  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5579  * it is first placed into the on-chip ram.  When the packet's length
5580  * is known, it walks down the TG3_BDINFO entries to select the ring.
5581  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5582  * which is within the range of the new packet's length is chosen.
5583  *
5584  * The "separate ring for rx status" scheme may sound queer, but it makes
5585  * sense from a cache coherency perspective.  If only the host writes
5586  * to the buffer post rings, and only the chip writes to the rx status
5587  * rings, then cache lines never move beyond shared-modified state.
5588  * If both the host and chip were to write into the same ring, cache line
5589  * eviction could occur since both entities want it in an exclusive state.
5590  */
5591 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5592 {
5593         struct tg3 *tp = tnapi->tp;
5594         u32 work_mask, rx_std_posted = 0;
5595         u32 std_prod_idx, jmb_prod_idx;
5596         u32 sw_idx = tnapi->rx_rcb_ptr;
5597         u16 hw_idx;
5598         int received;
5599         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5600
5601         hw_idx = *(tnapi->rx_rcb_prod_idx);
5602         /*
5603          * We need to order the read of hw_idx and the read of
5604          * the opaque cookie.
5605          */
5606         rmb();
5607         work_mask = 0;
5608         received = 0;
5609         std_prod_idx = tpr->rx_std_prod_idx;
5610         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5611         while (sw_idx != hw_idx && budget > 0) {
5612                 struct ring_info *ri;
5613                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5614                 unsigned int len;
5615                 struct sk_buff *skb;
5616                 dma_addr_t dma_addr;
5617                 u32 opaque_key, desc_idx, *post_ptr;
5618
5619                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5620                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5621                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5622                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5623                         dma_addr = dma_unmap_addr(ri, mapping);
5624                         skb = ri->skb;
5625                         post_ptr = &std_prod_idx;
5626                         rx_std_posted++;
5627                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5628                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5629                         dma_addr = dma_unmap_addr(ri, mapping);
5630                         skb = ri->skb;
5631                         post_ptr = &jmb_prod_idx;
5632                 } else
5633                         goto next_pkt_nopost;
5634
5635                 work_mask |= opaque_key;
5636
5637                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5638                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5639                 drop_it:
5640                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5641                                        desc_idx, *post_ptr);
5642                 drop_it_no_recycle:
5643                         /* Other statistics kept track of by card. */
5644                         tp->rx_dropped++;
5645                         goto next_pkt;
5646                 }
5647
5648                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5649                       ETH_FCS_LEN;
5650
5651                 if (len > TG3_RX_COPY_THRESH(tp)) {
5652                         int skb_size;
5653
5654                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5655                                                     *post_ptr);
5656                         if (skb_size < 0)
5657                                 goto drop_it;
5658
5659                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5660                                          PCI_DMA_FROMDEVICE);
5661
5662                         /* Ensure that the update to the skb happens
5663                          * after the usage of the old DMA mapping.
5664                          */
5665                         smp_wmb();
5666
5667                         ri->skb = NULL;
5668
5669                         skb_put(skb, len);
5670                 } else {
5671                         struct sk_buff *copy_skb;
5672
5673                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5674                                        desc_idx, *post_ptr);
5675
5676                         copy_skb = netdev_alloc_skb(tp->dev, len +
5677                                                     TG3_RAW_IP_ALIGN);
5678                         if (copy_skb == NULL)
5679                                 goto drop_it_no_recycle;
5680
5681                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5682                         skb_put(copy_skb, len);
5683                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5684                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5685                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5686
5687                         /* We'll reuse the original ring buffer. */
5688                         skb = copy_skb;
5689                 }
5690
5691                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5692                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5693                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5694                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5695                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5696                 else
5697                         skb_checksum_none_assert(skb);
5698
5699                 skb->protocol = eth_type_trans(skb, tp->dev);
5700
5701                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5702                     skb->protocol != htons(ETH_P_8021Q)) {
5703                         dev_kfree_skb(skb);
5704                         goto drop_it_no_recycle;
5705                 }
5706
5707                 if (desc->type_flags & RXD_FLAG_VLAN &&
5708                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5709                         __vlan_hwaccel_put_tag(skb,
5710                                                desc->err_vlan & RXD_VLAN_MASK);
5711
5712                 napi_gro_receive(&tnapi->napi, skb);
5713
5714                 received++;
5715                 budget--;
5716
5717 next_pkt:
5718                 (*post_ptr)++;
5719
5720                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5721                         tpr->rx_std_prod_idx = std_prod_idx &
5722                                                tp->rx_std_ring_mask;
5723                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5724                                      tpr->rx_std_prod_idx);
5725                         work_mask &= ~RXD_OPAQUE_RING_STD;
5726                         rx_std_posted = 0;
5727                 }
5728 next_pkt_nopost:
5729                 sw_idx++;
5730                 sw_idx &= tp->rx_ret_ring_mask;
5731
5732                 /* Refresh hw_idx to see if there is new work */
5733                 if (sw_idx == hw_idx) {
5734                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5735                         rmb();
5736                 }
5737         }
5738
5739         /* ACK the status ring. */
5740         tnapi->rx_rcb_ptr = sw_idx;
5741         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5742
5743         /* Refill RX ring(s). */
5744         if (!tg3_flag(tp, ENABLE_RSS)) {
5745                 if (work_mask & RXD_OPAQUE_RING_STD) {
5746                         tpr->rx_std_prod_idx = std_prod_idx &
5747                                                tp->rx_std_ring_mask;
5748                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5749                                      tpr->rx_std_prod_idx);
5750                 }
5751                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5752                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5753                                                tp->rx_jmb_ring_mask;
5754                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5755                                      tpr->rx_jmb_prod_idx);
5756                 }
5757                 mmiowb();
5758         } else if (work_mask) {
5759                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5760                  * updated before the producer indices can be updated.
5761                  */
5762                 smp_wmb();
5763
5764                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5765                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5766
5767                 if (tnapi != &tp->napi[1])
5768                         napi_schedule(&tp->napi[1].napi);
5769         }
5770
5771         return received;
5772 }
5773
5774 static void tg3_poll_link(struct tg3 *tp)
5775 {
5776         /* handle link change and other phy events */
5777         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5778                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5779
5780                 if (sblk->status & SD_STATUS_LINK_CHG) {
5781                         sblk->status = SD_STATUS_UPDATED |
5782                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5783                         spin_lock(&tp->lock);
5784                         if (tg3_flag(tp, USE_PHYLIB)) {
5785                                 tw32_f(MAC_STATUS,
5786                                      (MAC_STATUS_SYNC_CHANGED |
5787                                       MAC_STATUS_CFG_CHANGED |
5788                                       MAC_STATUS_MI_COMPLETION |
5789                                       MAC_STATUS_LNKSTATE_CHANGED));
5790                                 udelay(40);
5791                         } else
5792                                 tg3_setup_phy(tp, 0);
5793                         spin_unlock(&tp->lock);
5794                 }
5795         }
5796 }
5797
5798 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5799                                 struct tg3_rx_prodring_set *dpr,
5800                                 struct tg3_rx_prodring_set *spr)
5801 {
5802         u32 si, di, cpycnt, src_prod_idx;
5803         int i, err = 0;
5804
5805         while (1) {
5806                 src_prod_idx = spr->rx_std_prod_idx;
5807
5808                 /* Make sure updates to the rx_std_buffers[] entries and the
5809                  * standard producer index are seen in the correct order.
5810                  */
5811                 smp_rmb();
5812
5813                 if (spr->rx_std_cons_idx == src_prod_idx)
5814                         break;
5815
5816                 if (spr->rx_std_cons_idx < src_prod_idx)
5817                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5818                 else
5819                         cpycnt = tp->rx_std_ring_mask + 1 -
5820                                  spr->rx_std_cons_idx;
5821
5822                 cpycnt = min(cpycnt,
5823                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5824
5825                 si = spr->rx_std_cons_idx;
5826                 di = dpr->rx_std_prod_idx;
5827
5828                 for (i = di; i < di + cpycnt; i++) {
5829                         if (dpr->rx_std_buffers[i].skb) {
5830                                 cpycnt = i - di;
5831                                 err = -ENOSPC;
5832                                 break;
5833                         }
5834                 }
5835
5836                 if (!cpycnt)
5837                         break;
5838
5839                 /* Ensure that updates to the rx_std_buffers ring and the
5840                  * shadowed hardware producer ring from tg3_recycle_skb() are
5841                  * ordered correctly WRT the skb check above.
5842                  */
5843                 smp_rmb();
5844
5845                 memcpy(&dpr->rx_std_buffers[di],
5846                        &spr->rx_std_buffers[si],
5847                        cpycnt * sizeof(struct ring_info));
5848
5849                 for (i = 0; i < cpycnt; i++, di++, si++) {
5850                         struct tg3_rx_buffer_desc *sbd, *dbd;
5851                         sbd = &spr->rx_std[si];
5852                         dbd = &dpr->rx_std[di];
5853                         dbd->addr_hi = sbd->addr_hi;
5854                         dbd->addr_lo = sbd->addr_lo;
5855                 }
5856
5857                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5858                                        tp->rx_std_ring_mask;
5859                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5860                                        tp->rx_std_ring_mask;
5861         }
5862
5863         while (1) {
5864                 src_prod_idx = spr->rx_jmb_prod_idx;
5865
5866                 /* Make sure updates to the rx_jmb_buffers[] entries and
5867                  * the jumbo producer index are seen in the correct order.
5868                  */
5869                 smp_rmb();
5870
5871                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5872                         break;
5873
5874                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5875                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5876                 else
5877                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5878                                  spr->rx_jmb_cons_idx;
5879
5880                 cpycnt = min(cpycnt,
5881                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5882
5883                 si = spr->rx_jmb_cons_idx;
5884                 di = dpr->rx_jmb_prod_idx;
5885
5886                 for (i = di; i < di + cpycnt; i++) {
5887                         if (dpr->rx_jmb_buffers[i].skb) {
5888                                 cpycnt = i - di;
5889                                 err = -ENOSPC;
5890                                 break;
5891                         }
5892                 }
5893
5894                 if (!cpycnt)
5895                         break;
5896
5897                 /* Ensure that updates to the rx_jmb_buffers ring and the
5898                  * shadowed hardware producer ring from tg3_recycle_skb() are
5899                  * ordered correctly WRT the skb check above.
5900                  */
5901                 smp_rmb();
5902
5903                 memcpy(&dpr->rx_jmb_buffers[di],
5904                        &spr->rx_jmb_buffers[si],
5905                        cpycnt * sizeof(struct ring_info));
5906
5907                 for (i = 0; i < cpycnt; i++, di++, si++) {
5908                         struct tg3_rx_buffer_desc *sbd, *dbd;
5909                         sbd = &spr->rx_jmb[si].std;
5910                         dbd = &dpr->rx_jmb[di].std;
5911                         dbd->addr_hi = sbd->addr_hi;
5912                         dbd->addr_lo = sbd->addr_lo;
5913                 }
5914
5915                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5916                                        tp->rx_jmb_ring_mask;
5917                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5918                                        tp->rx_jmb_ring_mask;
5919         }
5920
5921         return err;
5922 }
5923
5924 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5925 {
5926         struct tg3 *tp = tnapi->tp;
5927
5928         /* run TX completion thread */
5929         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5930                 tg3_tx(tnapi);
5931                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5932                         return work_done;
5933         }
5934
5935         if (!tnapi->rx_rcb_prod_idx)
5936                 return work_done;
5937
5938         /* run RX thread, within the bounds set by NAPI.
5939          * All RX "locking" is done by ensuring outside
5940          * code synchronizes with tg3->napi.poll()
5941          */
5942         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5943                 work_done += tg3_rx(tnapi, budget - work_done);
5944
5945         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5946                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5947                 int i, err = 0;
5948                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5949                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5950
5951                 for (i = 1; i < tp->irq_cnt; i++)
5952                         err |= tg3_rx_prodring_xfer(tp, dpr,
5953                                                     &tp->napi[i].prodring);
5954
5955                 wmb();
5956
5957                 if (std_prod_idx != dpr->rx_std_prod_idx)
5958                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5959                                      dpr->rx_std_prod_idx);
5960
5961                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5962                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5963                                      dpr->rx_jmb_prod_idx);
5964
5965                 mmiowb();
5966
5967                 if (err)
5968                         tw32_f(HOSTCC_MODE, tp->coal_now);
5969         }
5970
5971         return work_done;
5972 }
5973
5974 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5975 {
5976         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5977                 schedule_work(&tp->reset_task);
5978 }
5979
5980 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5981 {
5982         cancel_work_sync(&tp->reset_task);
5983         tg3_flag_clear(tp, RESET_TASK_PENDING);
5984 }
5985
5986 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5987 {
5988         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5989         struct tg3 *tp = tnapi->tp;
5990         int work_done = 0;
5991         struct tg3_hw_status *sblk = tnapi->hw_status;
5992
5993         while (1) {
5994                 work_done = tg3_poll_work(tnapi, work_done, budget);
5995
5996                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5997                         goto tx_recovery;
5998
5999                 if (unlikely(work_done >= budget))
6000                         break;
6001
6002                 /* tp->last_tag is used in tg3_int_reenable() below
6003                  * to tell the hw how much work has been processed,
6004                  * so we must read it before checking for more work.
6005                  */
6006                 tnapi->last_tag = sblk->status_tag;
6007                 tnapi->last_irq_tag = tnapi->last_tag;
6008                 rmb();
6009
6010                 /* check for RX/TX work to do */
6011                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6012                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6013                         napi_complete(napi);
6014                         /* Reenable interrupts. */
6015                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6016                         mmiowb();
6017                         break;
6018                 }
6019         }
6020
6021         return work_done;
6022
6023 tx_recovery:
6024         /* work_done is guaranteed to be less than budget. */
6025         napi_complete(napi);
6026         tg3_reset_task_schedule(tp);
6027         return work_done;
6028 }
6029
6030 static void tg3_process_error(struct tg3 *tp)
6031 {
6032         u32 val;
6033         bool real_error = false;
6034
6035         if (tg3_flag(tp, ERROR_PROCESSED))
6036                 return;
6037
6038         /* Check Flow Attention register */
6039         val = tr32(HOSTCC_FLOW_ATTN);
6040         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6041                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6042                 real_error = true;
6043         }
6044
6045         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6046                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6047                 real_error = true;
6048         }
6049
6050         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6051                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6052                 real_error = true;
6053         }
6054
6055         if (!real_error)
6056                 return;
6057
6058         tg3_dump_state(tp);
6059
6060         tg3_flag_set(tp, ERROR_PROCESSED);
6061         tg3_reset_task_schedule(tp);
6062 }
6063
6064 static int tg3_poll(struct napi_struct *napi, int budget)
6065 {
6066         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6067         struct tg3 *tp = tnapi->tp;
6068         int work_done = 0;
6069         struct tg3_hw_status *sblk = tnapi->hw_status;
6070
6071         while (1) {
6072                 if (sblk->status & SD_STATUS_ERROR)
6073                         tg3_process_error(tp);
6074
6075                 tg3_poll_link(tp);
6076
6077                 work_done = tg3_poll_work(tnapi, work_done, budget);
6078
6079                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6080                         goto tx_recovery;
6081
6082                 if (unlikely(work_done >= budget))
6083                         break;
6084
6085                 if (tg3_flag(tp, TAGGED_STATUS)) {
6086                         /* tp->last_tag is used in tg3_int_reenable() below
6087                          * to tell the hw how much work has been processed,
6088                          * so we must read it before checking for more work.
6089                          */
6090                         tnapi->last_tag = sblk->status_tag;
6091                         tnapi->last_irq_tag = tnapi->last_tag;
6092                         rmb();
6093                 } else
6094                         sblk->status &= ~SD_STATUS_UPDATED;
6095
6096                 if (likely(!tg3_has_work(tnapi))) {
6097                         napi_complete(napi);
6098                         tg3_int_reenable(tnapi);
6099                         break;
6100                 }
6101         }
6102
6103         return work_done;
6104
6105 tx_recovery:
6106         /* work_done is guaranteed to be less than budget. */
6107         napi_complete(napi);
6108         tg3_reset_task_schedule(tp);
6109         return work_done;
6110 }
6111
6112 static void tg3_napi_disable(struct tg3 *tp)
6113 {
6114         int i;
6115
6116         for (i = tp->irq_cnt - 1; i >= 0; i--)
6117                 napi_disable(&tp->napi[i].napi);
6118 }
6119
6120 static void tg3_napi_enable(struct tg3 *tp)
6121 {
6122         int i;
6123
6124         for (i = 0; i < tp->irq_cnt; i++)
6125                 napi_enable(&tp->napi[i].napi);
6126 }
6127
6128 static void tg3_napi_init(struct tg3 *tp)
6129 {
6130         int i;
6131
6132         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6133         for (i = 1; i < tp->irq_cnt; i++)
6134                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6135 }
6136
6137 static void tg3_napi_fini(struct tg3 *tp)
6138 {
6139         int i;
6140
6141         for (i = 0; i < tp->irq_cnt; i++)
6142                 netif_napi_del(&tp->napi[i].napi);
6143 }
6144
6145 static inline void tg3_netif_stop(struct tg3 *tp)
6146 {
6147         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6148         tg3_napi_disable(tp);
6149         netif_tx_disable(tp->dev);
6150 }
6151
6152 static inline void tg3_netif_start(struct tg3 *tp)
6153 {
6154         /* NOTE: unconditional netif_tx_wake_all_queues is only
6155          * appropriate so long as all callers are assured to
6156          * have free tx slots (such as after tg3_init_hw)
6157          */
6158         netif_tx_wake_all_queues(tp->dev);
6159
6160         tg3_napi_enable(tp);
6161         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6162         tg3_enable_ints(tp);
6163 }
6164
6165 static void tg3_irq_quiesce(struct tg3 *tp)
6166 {
6167         int i;
6168
6169         BUG_ON(tp->irq_sync);
6170
6171         tp->irq_sync = 1;
6172         smp_mb();
6173
6174         for (i = 0; i < tp->irq_cnt; i++)
6175                 synchronize_irq(tp->napi[i].irq_vec);
6176 }
6177
6178 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6179  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6180  * with as well.  Most of the time, this is not necessary except when
6181  * shutting down the device.
6182  */
6183 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6184 {
6185         spin_lock_bh(&tp->lock);
6186         if (irq_sync)
6187                 tg3_irq_quiesce(tp);
6188 }
6189
6190 static inline void tg3_full_unlock(struct tg3 *tp)
6191 {
6192         spin_unlock_bh(&tp->lock);
6193 }
6194
6195 /* One-shot MSI handler - Chip automatically disables interrupt
6196  * after sending MSI so driver doesn't have to do it.
6197  */
6198 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6199 {
6200         struct tg3_napi *tnapi = dev_id;
6201         struct tg3 *tp = tnapi->tp;
6202
6203         prefetch(tnapi->hw_status);
6204         if (tnapi->rx_rcb)
6205                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6206
6207         if (likely(!tg3_irq_sync(tp)))
6208                 napi_schedule(&tnapi->napi);
6209
6210         return IRQ_HANDLED;
6211 }
6212
6213 /* MSI ISR - No need to check for interrupt sharing and no need to
6214  * flush status block and interrupt mailbox. PCI ordering rules
6215  * guarantee that MSI will arrive after the status block.
6216  */
6217 static irqreturn_t tg3_msi(int irq, void *dev_id)
6218 {
6219         struct tg3_napi *tnapi = dev_id;
6220         struct tg3 *tp = tnapi->tp;
6221
6222         prefetch(tnapi->hw_status);
6223         if (tnapi->rx_rcb)
6224                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6225         /*
6226          * Writing any value to intr-mbox-0 clears PCI INTA# and
6227          * chip-internal interrupt pending events.
6228          * Writing non-zero to intr-mbox-0 additional tells the
6229          * NIC to stop sending us irqs, engaging "in-intr-handler"
6230          * event coalescing.
6231          */
6232         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6233         if (likely(!tg3_irq_sync(tp)))
6234                 napi_schedule(&tnapi->napi);
6235
6236         return IRQ_RETVAL(1);
6237 }
6238
6239 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6240 {
6241         struct tg3_napi *tnapi = dev_id;
6242         struct tg3 *tp = tnapi->tp;
6243         struct tg3_hw_status *sblk = tnapi->hw_status;
6244         unsigned int handled = 1;
6245
6246         /* In INTx mode, it is possible for the interrupt to arrive at
6247          * the CPU before the status block posted prior to the interrupt.
6248          * Reading the PCI State register will confirm whether the
6249          * interrupt is ours and will flush the status block.
6250          */
6251         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6252                 if (tg3_flag(tp, CHIP_RESETTING) ||
6253                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6254                         handled = 0;
6255                         goto out;
6256                 }
6257         }
6258
6259         /*
6260          * Writing any value to intr-mbox-0 clears PCI INTA# and
6261          * chip-internal interrupt pending events.
6262          * Writing non-zero to intr-mbox-0 additional tells the
6263          * NIC to stop sending us irqs, engaging "in-intr-handler"
6264          * event coalescing.
6265          *
6266          * Flush the mailbox to de-assert the IRQ immediately to prevent
6267          * spurious interrupts.  The flush impacts performance but
6268          * excessive spurious interrupts can be worse in some cases.
6269          */
6270         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6271         if (tg3_irq_sync(tp))
6272                 goto out;
6273         sblk->status &= ~SD_STATUS_UPDATED;
6274         if (likely(tg3_has_work(tnapi))) {
6275                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6276                 napi_schedule(&tnapi->napi);
6277         } else {
6278                 /* No work, shared interrupt perhaps?  re-enable
6279                  * interrupts, and flush that PCI write
6280                  */
6281                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6282                                0x00000000);
6283         }
6284 out:
6285         return IRQ_RETVAL(handled);
6286 }
6287
6288 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6289 {
6290         struct tg3_napi *tnapi = dev_id;
6291         struct tg3 *tp = tnapi->tp;
6292         struct tg3_hw_status *sblk = tnapi->hw_status;
6293         unsigned int handled = 1;
6294
6295         /* In INTx mode, it is possible for the interrupt to arrive at
6296          * the CPU before the status block posted prior to the interrupt.
6297          * Reading the PCI State register will confirm whether the
6298          * interrupt is ours and will flush the status block.
6299          */
6300         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6301                 if (tg3_flag(tp, CHIP_RESETTING) ||
6302                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6303                         handled = 0;
6304                         goto out;
6305                 }
6306         }
6307
6308         /*
6309          * writing any value to intr-mbox-0 clears PCI INTA# and
6310          * chip-internal interrupt pending events.
6311          * writing non-zero to intr-mbox-0 additional tells the
6312          * NIC to stop sending us irqs, engaging "in-intr-handler"
6313          * event coalescing.
6314          *
6315          * Flush the mailbox to de-assert the IRQ immediately to prevent
6316          * spurious interrupts.  The flush impacts performance but
6317          * excessive spurious interrupts can be worse in some cases.
6318          */
6319         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6320
6321         /*
6322          * In a shared interrupt configuration, sometimes other devices'
6323          * interrupts will scream.  We record the current status tag here
6324          * so that the above check can report that the screaming interrupts
6325          * are unhandled.  Eventually they will be silenced.
6326          */
6327         tnapi->last_irq_tag = sblk->status_tag;
6328
6329         if (tg3_irq_sync(tp))
6330                 goto out;
6331
6332         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6333
6334         napi_schedule(&tnapi->napi);
6335
6336 out:
6337         return IRQ_RETVAL(handled);
6338 }
6339
6340 /* ISR for interrupt test */
6341 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6342 {
6343         struct tg3_napi *tnapi = dev_id;
6344         struct tg3 *tp = tnapi->tp;
6345         struct tg3_hw_status *sblk = tnapi->hw_status;
6346
6347         if ((sblk->status & SD_STATUS_UPDATED) ||
6348             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6349                 tg3_disable_ints(tp);
6350                 return IRQ_RETVAL(1);
6351         }
6352         return IRQ_RETVAL(0);
6353 }
6354
6355 static int tg3_init_hw(struct tg3 *, int);
6356 static int tg3_halt(struct tg3 *, int, int);
6357
6358 /* Restart hardware after configuration changes, self-test, etc.
6359  * Invoked with tp->lock held.
6360  */
6361 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6362         __releases(tp->lock)
6363         __acquires(tp->lock)
6364 {
6365         int err;
6366
6367         err = tg3_init_hw(tp, reset_phy);
6368         if (err) {
6369                 netdev_err(tp->dev,
6370                            "Failed to re-initialize device, aborting\n");
6371                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6372                 tg3_full_unlock(tp);
6373                 del_timer_sync(&tp->timer);
6374                 tp->irq_sync = 0;
6375                 tg3_napi_enable(tp);
6376                 dev_close(tp->dev);
6377                 tg3_full_lock(tp, 0);
6378         }
6379         return err;
6380 }
6381
6382 #ifdef CONFIG_NET_POLL_CONTROLLER
6383 static void tg3_poll_controller(struct net_device *dev)
6384 {
6385         int i;
6386         struct tg3 *tp = netdev_priv(dev);
6387
6388         if (tg3_irq_sync(tp))
6389                 return;
6390
6391         for (i = 0; i < tp->irq_cnt; i++)
6392                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6393 }
6394 #endif
6395
6396 static void tg3_reset_task(struct work_struct *work)
6397 {
6398         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6399         int err;
6400
6401         tg3_full_lock(tp, 0);
6402
6403         if (!netif_running(tp->dev)) {
6404                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6405                 tg3_full_unlock(tp);
6406                 return;
6407         }
6408
6409         tg3_full_unlock(tp);
6410
6411         tg3_phy_stop(tp);
6412
6413         tg3_netif_stop(tp);
6414
6415         tg3_full_lock(tp, 1);
6416
6417         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6418                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6419                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6420                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6421                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6422         }
6423
6424         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6425         err = tg3_init_hw(tp, 1);
6426         if (err)
6427                 goto out;
6428
6429         tg3_netif_start(tp);
6430
6431 out:
6432         tg3_full_unlock(tp);
6433
6434         if (!err)
6435                 tg3_phy_start(tp);
6436
6437         tg3_flag_clear(tp, RESET_TASK_PENDING);
6438 }
6439
6440 static void tg3_tx_timeout(struct net_device *dev)
6441 {
6442         struct tg3 *tp = netdev_priv(dev);
6443
6444         if (netif_msg_tx_err(tp)) {
6445                 netdev_err(dev, "transmit timed out, resetting\n");
6446                 tg3_dump_state(tp);
6447         }
6448
6449         tg3_reset_task_schedule(tp);
6450 }
6451
6452 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6453 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6454 {
6455         u32 base = (u32) mapping & 0xffffffff;
6456
6457         return (base > 0xffffdcc0) && (base + len + 8 < base);
6458 }
6459
6460 /* Test for DMA addresses > 40-bit */
6461 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6462                                           int len)
6463 {
6464 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6465         if (tg3_flag(tp, 40BIT_DMA_BUG))
6466                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6467         return 0;
6468 #else
6469         return 0;
6470 #endif
6471 }
6472
6473 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6474                                  dma_addr_t mapping, u32 len, u32 flags,
6475                                  u32 mss, u32 vlan)
6476 {
6477         txbd->addr_hi = ((u64) mapping >> 32);
6478         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6479         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6480         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6481 }
6482
6483 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6484                             dma_addr_t map, u32 len, u32 flags,
6485                             u32 mss, u32 vlan)
6486 {
6487         struct tg3 *tp = tnapi->tp;
6488         bool hwbug = false;
6489
6490         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6491                 hwbug = 1;
6492
6493         if (tg3_4g_overflow_test(map, len))
6494                 hwbug = 1;
6495
6496         if (tg3_40bit_overflow_test(tp, map, len))
6497                 hwbug = 1;
6498
6499         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6500                 u32 prvidx = *entry;
6501                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6502                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6503                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6504                         len -= TG3_TX_BD_DMA_MAX;
6505
6506                         /* Avoid the 8byte DMA problem */
6507                         if (len <= 8) {
6508                                 len += TG3_TX_BD_DMA_MAX / 2;
6509                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6510                         }
6511
6512                         tnapi->tx_buffers[*entry].fragmented = true;
6513
6514                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6515                                       frag_len, tmp_flag, mss, vlan);
6516                         *budget -= 1;
6517                         prvidx = *entry;
6518                         *entry = NEXT_TX(*entry);
6519
6520                         map += frag_len;
6521                 }
6522
6523                 if (len) {
6524                         if (*budget) {
6525                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6526                                               len, flags, mss, vlan);
6527                                 *budget -= 1;
6528                                 *entry = NEXT_TX(*entry);
6529                         } else {
6530                                 hwbug = 1;
6531                                 tnapi->tx_buffers[prvidx].fragmented = false;
6532                         }
6533                 }
6534         } else {
6535                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6536                               len, flags, mss, vlan);
6537                 *entry = NEXT_TX(*entry);
6538         }
6539
6540         return hwbug;
6541 }
6542
6543 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6544 {
6545         int i;
6546         struct sk_buff *skb;
6547         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6548
6549         skb = txb->skb;
6550         txb->skb = NULL;
6551
6552         pci_unmap_single(tnapi->tp->pdev,
6553                          dma_unmap_addr(txb, mapping),
6554                          skb_headlen(skb),
6555                          PCI_DMA_TODEVICE);
6556
6557         while (txb->fragmented) {
6558                 txb->fragmented = false;
6559                 entry = NEXT_TX(entry);
6560                 txb = &tnapi->tx_buffers[entry];
6561         }
6562
6563         for (i = 0; i <= last; i++) {
6564                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6565
6566                 entry = NEXT_TX(entry);
6567                 txb = &tnapi->tx_buffers[entry];
6568
6569                 pci_unmap_page(tnapi->tp->pdev,
6570                                dma_unmap_addr(txb, mapping),
6571                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6572
6573                 while (txb->fragmented) {
6574                         txb->fragmented = false;
6575                         entry = NEXT_TX(entry);
6576                         txb = &tnapi->tx_buffers[entry];
6577                 }
6578         }
6579 }
6580
6581 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6582 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6583                                        struct sk_buff **pskb,
6584                                        u32 *entry, u32 *budget,
6585                                        u32 base_flags, u32 mss, u32 vlan)
6586 {
6587         struct tg3 *tp = tnapi->tp;
6588         struct sk_buff *new_skb, *skb = *pskb;
6589         dma_addr_t new_addr = 0;
6590         int ret = 0;
6591
6592         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6593                 new_skb = skb_copy(skb, GFP_ATOMIC);
6594         else {
6595                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6596
6597                 new_skb = skb_copy_expand(skb,
6598                                           skb_headroom(skb) + more_headroom,
6599                                           skb_tailroom(skb), GFP_ATOMIC);
6600         }
6601
6602         if (!new_skb) {
6603                 ret = -1;
6604         } else {
6605                 /* New SKB is guaranteed to be linear. */
6606                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6607                                           PCI_DMA_TODEVICE);
6608                 /* Make sure the mapping succeeded */
6609                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6610                         dev_kfree_skb(new_skb);
6611                         ret = -1;
6612                 } else {
6613                         u32 save_entry = *entry;
6614
6615                         base_flags |= TXD_FLAG_END;
6616
6617                         tnapi->tx_buffers[*entry].skb = new_skb;
6618                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6619                                            mapping, new_addr);
6620
6621                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6622                                             new_skb->len, base_flags,
6623                                             mss, vlan)) {
6624                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6625                                 dev_kfree_skb(new_skb);
6626                                 ret = -1;
6627                         }
6628                 }
6629         }
6630
6631         dev_kfree_skb(skb);
6632         *pskb = new_skb;
6633         return ret;
6634 }
6635
6636 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6637
6638 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6639  * TSO header is greater than 80 bytes.
6640  */
6641 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6642 {
6643         struct sk_buff *segs, *nskb;
6644         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6645
6646         /* Estimate the number of fragments in the worst case */
6647         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6648                 netif_stop_queue(tp->dev);
6649
6650                 /* netif_tx_stop_queue() must be done before checking
6651                  * checking tx index in tg3_tx_avail() below, because in
6652                  * tg3_tx(), we update tx index before checking for
6653                  * netif_tx_queue_stopped().
6654                  */
6655                 smp_mb();
6656                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6657                         return NETDEV_TX_BUSY;
6658
6659                 netif_wake_queue(tp->dev);
6660         }
6661
6662         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6663         if (IS_ERR(segs))
6664                 goto tg3_tso_bug_end;
6665
6666         do {
6667                 nskb = segs;
6668                 segs = segs->next;
6669                 nskb->next = NULL;
6670                 tg3_start_xmit(nskb, tp->dev);
6671         } while (segs);
6672
6673 tg3_tso_bug_end:
6674         dev_kfree_skb(skb);
6675
6676         return NETDEV_TX_OK;
6677 }
6678
6679 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6680  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6681  */
6682 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6683 {
6684         struct tg3 *tp = netdev_priv(dev);
6685         u32 len, entry, base_flags, mss, vlan = 0;
6686         u32 budget;
6687         int i = -1, would_hit_hwbug;
6688         dma_addr_t mapping;
6689         struct tg3_napi *tnapi;
6690         struct netdev_queue *txq;
6691         unsigned int last;
6692
6693         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6694         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6695         if (tg3_flag(tp, ENABLE_TSS))
6696                 tnapi++;
6697
6698         budget = tg3_tx_avail(tnapi);
6699
6700         /* We are running in BH disabled context with netif_tx_lock
6701          * and TX reclaim runs via tp->napi.poll inside of a software
6702          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6703          * no IRQ context deadlocks to worry about either.  Rejoice!
6704          */
6705         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6706                 if (!netif_tx_queue_stopped(txq)) {
6707                         netif_tx_stop_queue(txq);
6708
6709                         /* This is a hard error, log it. */
6710                         netdev_err(dev,
6711                                    "BUG! Tx Ring full when queue awake!\n");
6712                 }
6713                 return NETDEV_TX_BUSY;
6714         }
6715
6716         entry = tnapi->tx_prod;
6717         base_flags = 0;
6718         if (skb->ip_summed == CHECKSUM_PARTIAL)
6719                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6720
6721         mss = skb_shinfo(skb)->gso_size;
6722         if (mss) {
6723                 struct iphdr *iph;
6724                 u32 tcp_opt_len, hdr_len;
6725
6726                 if (skb_header_cloned(skb) &&
6727                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6728                         goto drop;
6729
6730                 iph = ip_hdr(skb);
6731                 tcp_opt_len = tcp_optlen(skb);
6732
6733                 if (skb_is_gso_v6(skb)) {
6734                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6735                 } else {
6736                         u32 ip_tcp_len;
6737
6738                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6739                         hdr_len = ip_tcp_len + tcp_opt_len;
6740
6741                         iph->check = 0;
6742                         iph->tot_len = htons(mss + hdr_len);
6743                 }
6744
6745                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6746                     tg3_flag(tp, TSO_BUG))
6747                         return tg3_tso_bug(tp, skb);
6748
6749                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6750                                TXD_FLAG_CPU_POST_DMA);
6751
6752                 if (tg3_flag(tp, HW_TSO_1) ||
6753                     tg3_flag(tp, HW_TSO_2) ||
6754                     tg3_flag(tp, HW_TSO_3)) {
6755                         tcp_hdr(skb)->check = 0;
6756                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6757                 } else
6758                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6759                                                                  iph->daddr, 0,
6760                                                                  IPPROTO_TCP,
6761                                                                  0);
6762
6763                 if (tg3_flag(tp, HW_TSO_3)) {
6764                         mss |= (hdr_len & 0xc) << 12;
6765                         if (hdr_len & 0x10)
6766                                 base_flags |= 0x00000010;
6767                         base_flags |= (hdr_len & 0x3e0) << 5;
6768                 } else if (tg3_flag(tp, HW_TSO_2))
6769                         mss |= hdr_len << 9;
6770                 else if (tg3_flag(tp, HW_TSO_1) ||
6771                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6772                         if (tcp_opt_len || iph->ihl > 5) {
6773                                 int tsflags;
6774
6775                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6776                                 mss |= (tsflags << 11);
6777                         }
6778                 } else {
6779                         if (tcp_opt_len || iph->ihl > 5) {
6780                                 int tsflags;
6781
6782                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6783                                 base_flags |= tsflags << 12;
6784                         }
6785                 }
6786         }
6787
6788         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6789             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6790                 base_flags |= TXD_FLAG_JMB_PKT;
6791
6792         if (vlan_tx_tag_present(skb)) {
6793                 base_flags |= TXD_FLAG_VLAN;
6794                 vlan = vlan_tx_tag_get(skb);
6795         }
6796
6797         len = skb_headlen(skb);
6798
6799         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6800         if (pci_dma_mapping_error(tp->pdev, mapping))
6801                 goto drop;
6802
6803
6804         tnapi->tx_buffers[entry].skb = skb;
6805         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6806
6807         would_hit_hwbug = 0;
6808
6809         if (tg3_flag(tp, 5701_DMA_BUG))
6810                 would_hit_hwbug = 1;
6811
6812         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6813                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6814                             mss, vlan)) {
6815                 would_hit_hwbug = 1;
6816         /* Now loop through additional data fragments, and queue them. */
6817         } else if (skb_shinfo(skb)->nr_frags > 0) {
6818                 u32 tmp_mss = mss;
6819
6820                 if (!tg3_flag(tp, HW_TSO_1) &&
6821                     !tg3_flag(tp, HW_TSO_2) &&
6822                     !tg3_flag(tp, HW_TSO_3))
6823                         tmp_mss = 0;
6824
6825                 last = skb_shinfo(skb)->nr_frags - 1;
6826                 for (i = 0; i <= last; i++) {
6827                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6828
6829                         len = skb_frag_size(frag);
6830                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6831                                                    len, DMA_TO_DEVICE);
6832
6833                         tnapi->tx_buffers[entry].skb = NULL;
6834                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6835                                            mapping);
6836                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6837                                 goto dma_error;
6838
6839                         if (!budget ||
6840                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6841                                             len, base_flags |
6842                                             ((i == last) ? TXD_FLAG_END : 0),
6843                                             tmp_mss, vlan)) {
6844                                 would_hit_hwbug = 1;
6845                                 break;
6846                         }
6847                 }
6848         }
6849
6850         if (would_hit_hwbug) {
6851                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6852
6853                 /* If the workaround fails due to memory/mapping
6854                  * failure, silently drop this packet.
6855                  */
6856                 entry = tnapi->tx_prod;
6857                 budget = tg3_tx_avail(tnapi);
6858                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6859                                                 base_flags, mss, vlan))
6860                         goto drop_nofree;
6861         }
6862
6863         skb_tx_timestamp(skb);
6864
6865         /* Packets are ready, update Tx producer idx local and on card. */
6866         tw32_tx_mbox(tnapi->prodmbox, entry);
6867
6868         tnapi->tx_prod = entry;
6869         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6870                 netif_tx_stop_queue(txq);
6871
6872                 /* netif_tx_stop_queue() must be done before checking
6873                  * checking tx index in tg3_tx_avail() below, because in
6874                  * tg3_tx(), we update tx index before checking for
6875                  * netif_tx_queue_stopped().
6876                  */
6877                 smp_mb();
6878                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6879                         netif_tx_wake_queue(txq);
6880         }
6881
6882         mmiowb();
6883         return NETDEV_TX_OK;
6884
6885 dma_error:
6886         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6887         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6888 drop:
6889         dev_kfree_skb(skb);
6890 drop_nofree:
6891         tp->tx_dropped++;
6892         return NETDEV_TX_OK;
6893 }
6894
6895 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6896 {
6897         if (enable) {
6898                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6899                                   MAC_MODE_PORT_MODE_MASK);
6900
6901                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6902
6903                 if (!tg3_flag(tp, 5705_PLUS))
6904                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6905
6906                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6907                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6908                 else
6909                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6910         } else {
6911                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6912
6913                 if (tg3_flag(tp, 5705_PLUS) ||
6914                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6915                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6916                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6917         }
6918
6919         tw32(MAC_MODE, tp->mac_mode);
6920         udelay(40);
6921 }
6922
6923 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6924 {
6925         u32 val, bmcr, mac_mode, ptest = 0;
6926
6927         tg3_phy_toggle_apd(tp, false);
6928         tg3_phy_toggle_automdix(tp, 0);
6929
6930         if (extlpbk && tg3_phy_set_extloopbk(tp))
6931                 return -EIO;
6932
6933         bmcr = BMCR_FULLDPLX;
6934         switch (speed) {
6935         case SPEED_10:
6936                 break;
6937         case SPEED_100:
6938                 bmcr |= BMCR_SPEED100;
6939                 break;
6940         case SPEED_1000:
6941         default:
6942                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6943                         speed = SPEED_100;
6944                         bmcr |= BMCR_SPEED100;
6945                 } else {
6946                         speed = SPEED_1000;
6947                         bmcr |= BMCR_SPEED1000;
6948                 }
6949         }
6950
6951         if (extlpbk) {
6952                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6953                         tg3_readphy(tp, MII_CTRL1000, &val);
6954                         val |= CTL1000_AS_MASTER |
6955                                CTL1000_ENABLE_MASTER;
6956                         tg3_writephy(tp, MII_CTRL1000, val);
6957                 } else {
6958                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6959                                 MII_TG3_FET_PTEST_TRIM_2;
6960                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6961                 }
6962         } else
6963                 bmcr |= BMCR_LOOPBACK;
6964
6965         tg3_writephy(tp, MII_BMCR, bmcr);
6966
6967         /* The write needs to be flushed for the FETs */
6968         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6969                 tg3_readphy(tp, MII_BMCR, &bmcr);
6970
6971         udelay(40);
6972
6973         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6975                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6976                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6977                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6978
6979                 /* The write needs to be flushed for the AC131 */
6980                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6981         }
6982
6983         /* Reset to prevent losing 1st rx packet intermittently */
6984         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6985             tg3_flag(tp, 5780_CLASS)) {
6986                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6987                 udelay(10);
6988                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6989         }
6990
6991         mac_mode = tp->mac_mode &
6992                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6993         if (speed == SPEED_1000)
6994                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6995         else
6996                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6997
6998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6999                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7000
7001                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7002                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7003                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7004                         mac_mode |= MAC_MODE_LINK_POLARITY;
7005
7006                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7007                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7008         }
7009
7010         tw32(MAC_MODE, mac_mode);
7011         udelay(40);
7012
7013         return 0;
7014 }
7015
7016 static void tg3_set_loopback(struct net_device *dev, u32 features)
7017 {
7018         struct tg3 *tp = netdev_priv(dev);
7019
7020         if (features & NETIF_F_LOOPBACK) {
7021                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7022                         return;
7023
7024                 spin_lock_bh(&tp->lock);
7025                 tg3_mac_loopback(tp, true);
7026                 netif_carrier_on(tp->dev);
7027                 spin_unlock_bh(&tp->lock);
7028                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7029         } else {
7030                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7031                         return;
7032
7033                 spin_lock_bh(&tp->lock);
7034                 tg3_mac_loopback(tp, false);
7035                 /* Force link status check */
7036                 tg3_setup_phy(tp, 1);
7037                 spin_unlock_bh(&tp->lock);
7038                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7039         }
7040 }
7041
7042 static u32 tg3_fix_features(struct net_device *dev, u32 features)
7043 {
7044         struct tg3 *tp = netdev_priv(dev);
7045
7046         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7047                 features &= ~NETIF_F_ALL_TSO;
7048
7049         return features;
7050 }
7051
7052 static int tg3_set_features(struct net_device *dev, u32 features)
7053 {
7054         u32 changed = dev->features ^ features;
7055
7056         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7057                 tg3_set_loopback(dev, features);
7058
7059         return 0;
7060 }
7061
7062 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7063                                int new_mtu)
7064 {
7065         dev->mtu = new_mtu;
7066
7067         if (new_mtu > ETH_DATA_LEN) {
7068                 if (tg3_flag(tp, 5780_CLASS)) {
7069                         netdev_update_features(dev);
7070                         tg3_flag_clear(tp, TSO_CAPABLE);
7071                 } else {
7072                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7073                 }
7074         } else {
7075                 if (tg3_flag(tp, 5780_CLASS)) {
7076                         tg3_flag_set(tp, TSO_CAPABLE);
7077                         netdev_update_features(dev);
7078                 }
7079                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7080         }
7081 }
7082
7083 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7084 {
7085         struct tg3 *tp = netdev_priv(dev);
7086         int err;
7087
7088         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7089                 return -EINVAL;
7090
7091         if (!netif_running(dev)) {
7092                 /* We'll just catch it later when the
7093                  * device is up'd.
7094                  */
7095                 tg3_set_mtu(dev, tp, new_mtu);
7096                 return 0;
7097         }
7098
7099         tg3_phy_stop(tp);
7100
7101         tg3_netif_stop(tp);
7102
7103         tg3_full_lock(tp, 1);
7104
7105         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7106
7107         tg3_set_mtu(dev, tp, new_mtu);
7108
7109         err = tg3_restart_hw(tp, 0);
7110
7111         if (!err)
7112                 tg3_netif_start(tp);
7113
7114         tg3_full_unlock(tp);
7115
7116         if (!err)
7117                 tg3_phy_start(tp);
7118
7119         return err;
7120 }
7121
7122 static void tg3_rx_prodring_free(struct tg3 *tp,
7123                                  struct tg3_rx_prodring_set *tpr)
7124 {
7125         int i;
7126
7127         if (tpr != &tp->napi[0].prodring) {
7128                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7129                      i = (i + 1) & tp->rx_std_ring_mask)
7130                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7131                                         tp->rx_pkt_map_sz);
7132
7133                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7134                         for (i = tpr->rx_jmb_cons_idx;
7135                              i != tpr->rx_jmb_prod_idx;
7136                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7137                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7138                                                 TG3_RX_JMB_MAP_SZ);
7139                         }
7140                 }
7141
7142                 return;
7143         }
7144
7145         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7146                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7147                                 tp->rx_pkt_map_sz);
7148
7149         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7150                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7151                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7152                                         TG3_RX_JMB_MAP_SZ);
7153         }
7154 }
7155
7156 /* Initialize rx rings for packet processing.
7157  *
7158  * The chip has been shut down and the driver detached from
7159  * the networking, so no interrupts or new tx packets will
7160  * end up in the driver.  tp->{tx,}lock are held and thus
7161  * we may not sleep.
7162  */
7163 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7164                                  struct tg3_rx_prodring_set *tpr)
7165 {
7166         u32 i, rx_pkt_dma_sz;
7167
7168         tpr->rx_std_cons_idx = 0;
7169         tpr->rx_std_prod_idx = 0;
7170         tpr->rx_jmb_cons_idx = 0;
7171         tpr->rx_jmb_prod_idx = 0;
7172
7173         if (tpr != &tp->napi[0].prodring) {
7174                 memset(&tpr->rx_std_buffers[0], 0,
7175                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7176                 if (tpr->rx_jmb_buffers)
7177                         memset(&tpr->rx_jmb_buffers[0], 0,
7178                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7179                 goto done;
7180         }
7181
7182         /* Zero out all descriptors. */
7183         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7184
7185         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7186         if (tg3_flag(tp, 5780_CLASS) &&
7187             tp->dev->mtu > ETH_DATA_LEN)
7188                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7189         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7190
7191         /* Initialize invariants of the rings, we only set this
7192          * stuff once.  This works because the card does not
7193          * write into the rx buffer posting rings.
7194          */
7195         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7196                 struct tg3_rx_buffer_desc *rxd;
7197
7198                 rxd = &tpr->rx_std[i];
7199                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7200                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7201                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7202                                (i << RXD_OPAQUE_INDEX_SHIFT));
7203         }
7204
7205         /* Now allocate fresh SKBs for each rx ring. */
7206         for (i = 0; i < tp->rx_pending; i++) {
7207                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7208                         netdev_warn(tp->dev,
7209                                     "Using a smaller RX standard ring. Only "
7210                                     "%d out of %d buffers were allocated "
7211                                     "successfully\n", i, tp->rx_pending);
7212                         if (i == 0)
7213                                 goto initfail;
7214                         tp->rx_pending = i;
7215                         break;
7216                 }
7217         }
7218
7219         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7220                 goto done;
7221
7222         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7223
7224         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7225                 goto done;
7226
7227         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7228                 struct tg3_rx_buffer_desc *rxd;
7229
7230                 rxd = &tpr->rx_jmb[i].std;
7231                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7232                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7233                                   RXD_FLAG_JUMBO;
7234                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7235                        (i << RXD_OPAQUE_INDEX_SHIFT));
7236         }
7237
7238         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7239                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7240                         netdev_warn(tp->dev,
7241                                     "Using a smaller RX jumbo ring. Only %d "
7242                                     "out of %d buffers were allocated "
7243                                     "successfully\n", i, tp->rx_jumbo_pending);
7244                         if (i == 0)
7245                                 goto initfail;
7246                         tp->rx_jumbo_pending = i;
7247                         break;
7248                 }
7249         }
7250
7251 done:
7252         return 0;
7253
7254 initfail:
7255         tg3_rx_prodring_free(tp, tpr);
7256         return -ENOMEM;
7257 }
7258
7259 static void tg3_rx_prodring_fini(struct tg3 *tp,
7260                                  struct tg3_rx_prodring_set *tpr)
7261 {
7262         kfree(tpr->rx_std_buffers);
7263         tpr->rx_std_buffers = NULL;
7264         kfree(tpr->rx_jmb_buffers);
7265         tpr->rx_jmb_buffers = NULL;
7266         if (tpr->rx_std) {
7267                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7268                                   tpr->rx_std, tpr->rx_std_mapping);
7269                 tpr->rx_std = NULL;
7270         }
7271         if (tpr->rx_jmb) {
7272                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7273                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7274                 tpr->rx_jmb = NULL;
7275         }
7276 }
7277
7278 static int tg3_rx_prodring_init(struct tg3 *tp,
7279                                 struct tg3_rx_prodring_set *tpr)
7280 {
7281         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7282                                       GFP_KERNEL);
7283         if (!tpr->rx_std_buffers)
7284                 return -ENOMEM;
7285
7286         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7287                                          TG3_RX_STD_RING_BYTES(tp),
7288                                          &tpr->rx_std_mapping,
7289                                          GFP_KERNEL);
7290         if (!tpr->rx_std)
7291                 goto err_out;
7292
7293         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7294                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7295                                               GFP_KERNEL);
7296                 if (!tpr->rx_jmb_buffers)
7297                         goto err_out;
7298
7299                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7300                                                  TG3_RX_JMB_RING_BYTES(tp),
7301                                                  &tpr->rx_jmb_mapping,
7302                                                  GFP_KERNEL);
7303                 if (!tpr->rx_jmb)
7304                         goto err_out;
7305         }
7306
7307         return 0;
7308
7309 err_out:
7310         tg3_rx_prodring_fini(tp, tpr);
7311         return -ENOMEM;
7312 }
7313
7314 /* Free up pending packets in all rx/tx rings.
7315  *
7316  * The chip has been shut down and the driver detached from
7317  * the networking, so no interrupts or new tx packets will
7318  * end up in the driver.  tp->{tx,}lock is not held and we are not
7319  * in an interrupt context and thus may sleep.
7320  */
7321 static void tg3_free_rings(struct tg3 *tp)
7322 {
7323         int i, j;
7324
7325         for (j = 0; j < tp->irq_cnt; j++) {
7326                 struct tg3_napi *tnapi = &tp->napi[j];
7327
7328                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7329
7330                 if (!tnapi->tx_buffers)
7331                         continue;
7332
7333                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7334                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7335
7336                         if (!skb)
7337                                 continue;
7338
7339                         tg3_tx_skb_unmap(tnapi, i,
7340                                          skb_shinfo(skb)->nr_frags - 1);
7341
7342                         dev_kfree_skb_any(skb);
7343                 }
7344         }
7345 }
7346
7347 /* Initialize tx/rx rings for packet processing.
7348  *
7349  * The chip has been shut down and the driver detached from
7350  * the networking, so no interrupts or new tx packets will
7351  * end up in the driver.  tp->{tx,}lock are held and thus
7352  * we may not sleep.
7353  */
7354 static int tg3_init_rings(struct tg3 *tp)
7355 {
7356         int i;
7357
7358         /* Free up all the SKBs. */
7359         tg3_free_rings(tp);
7360
7361         for (i = 0; i < tp->irq_cnt; i++) {
7362                 struct tg3_napi *tnapi = &tp->napi[i];
7363
7364                 tnapi->last_tag = 0;
7365                 tnapi->last_irq_tag = 0;
7366                 tnapi->hw_status->status = 0;
7367                 tnapi->hw_status->status_tag = 0;
7368                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7369
7370                 tnapi->tx_prod = 0;
7371                 tnapi->tx_cons = 0;
7372                 if (tnapi->tx_ring)
7373                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7374
7375                 tnapi->rx_rcb_ptr = 0;
7376                 if (tnapi->rx_rcb)
7377                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7378
7379                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7380                         tg3_free_rings(tp);
7381                         return -ENOMEM;
7382                 }
7383         }
7384
7385         return 0;
7386 }
7387
7388 /*
7389  * Must not be invoked with interrupt sources disabled and
7390  * the hardware shutdown down.
7391  */
7392 static void tg3_free_consistent(struct tg3 *tp)
7393 {
7394         int i;
7395
7396         for (i = 0; i < tp->irq_cnt; i++) {
7397                 struct tg3_napi *tnapi = &tp->napi[i];
7398
7399                 if (tnapi->tx_ring) {
7400                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7401                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7402                         tnapi->tx_ring = NULL;
7403                 }
7404
7405                 kfree(tnapi->tx_buffers);
7406                 tnapi->tx_buffers = NULL;
7407
7408                 if (tnapi->rx_rcb) {
7409                         dma_free_coherent(&tp->pdev->dev,
7410                                           TG3_RX_RCB_RING_BYTES(tp),
7411                                           tnapi->rx_rcb,
7412                                           tnapi->rx_rcb_mapping);
7413                         tnapi->rx_rcb = NULL;
7414                 }
7415
7416                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7417
7418                 if (tnapi->hw_status) {
7419                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7420                                           tnapi->hw_status,
7421                                           tnapi->status_mapping);
7422                         tnapi->hw_status = NULL;
7423                 }
7424         }
7425
7426         if (tp->hw_stats) {
7427                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7428                                   tp->hw_stats, tp->stats_mapping);
7429                 tp->hw_stats = NULL;
7430         }
7431 }
7432
7433 /*
7434  * Must not be invoked with interrupt sources disabled and
7435  * the hardware shutdown down.  Can sleep.
7436  */
7437 static int tg3_alloc_consistent(struct tg3 *tp)
7438 {
7439         int i;
7440
7441         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7442                                           sizeof(struct tg3_hw_stats),
7443                                           &tp->stats_mapping,
7444                                           GFP_KERNEL);
7445         if (!tp->hw_stats)
7446                 goto err_out;
7447
7448         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7449
7450         for (i = 0; i < tp->irq_cnt; i++) {
7451                 struct tg3_napi *tnapi = &tp->napi[i];
7452                 struct tg3_hw_status *sblk;
7453
7454                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7455                                                       TG3_HW_STATUS_SIZE,
7456                                                       &tnapi->status_mapping,
7457                                                       GFP_KERNEL);
7458                 if (!tnapi->hw_status)
7459                         goto err_out;
7460
7461                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7462                 sblk = tnapi->hw_status;
7463
7464                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7465                         goto err_out;
7466
7467                 /* If multivector TSS is enabled, vector 0 does not handle
7468                  * tx interrupts.  Don't allocate any resources for it.
7469                  */
7470                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7471                     (i && tg3_flag(tp, ENABLE_TSS))) {
7472                         tnapi->tx_buffers = kzalloc(
7473                                                sizeof(struct tg3_tx_ring_info) *
7474                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7475                         if (!tnapi->tx_buffers)
7476                                 goto err_out;
7477
7478                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7479                                                             TG3_TX_RING_BYTES,
7480                                                         &tnapi->tx_desc_mapping,
7481                                                             GFP_KERNEL);
7482                         if (!tnapi->tx_ring)
7483                                 goto err_out;
7484                 }
7485
7486                 /*
7487                  * When RSS is enabled, the status block format changes
7488                  * slightly.  The "rx_jumbo_consumer", "reserved",
7489                  * and "rx_mini_consumer" members get mapped to the
7490                  * other three rx return ring producer indexes.
7491                  */
7492                 switch (i) {
7493                 default:
7494                         if (tg3_flag(tp, ENABLE_RSS)) {
7495                                 tnapi->rx_rcb_prod_idx = NULL;
7496                                 break;
7497                         }
7498                         /* Fall through */
7499                 case 1:
7500                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7501                         break;
7502                 case 2:
7503                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7504                         break;
7505                 case 3:
7506                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7507                         break;
7508                 case 4:
7509                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7510                         break;
7511                 }
7512
7513                 /*
7514                  * If multivector RSS is enabled, vector 0 does not handle
7515                  * rx or tx interrupts.  Don't allocate any resources for it.
7516                  */
7517                 if (!i && tg3_flag(tp, ENABLE_RSS))
7518                         continue;
7519
7520                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7521                                                    TG3_RX_RCB_RING_BYTES(tp),
7522                                                    &tnapi->rx_rcb_mapping,
7523                                                    GFP_KERNEL);
7524                 if (!tnapi->rx_rcb)
7525                         goto err_out;
7526
7527                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7528         }
7529
7530         return 0;
7531
7532 err_out:
7533         tg3_free_consistent(tp);
7534         return -ENOMEM;
7535 }
7536
7537 #define MAX_WAIT_CNT 1000
7538
7539 /* To stop a block, clear the enable bit and poll till it
7540  * clears.  tp->lock is held.
7541  */
7542 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7543 {
7544         unsigned int i;
7545         u32 val;
7546
7547         if (tg3_flag(tp, 5705_PLUS)) {
7548                 switch (ofs) {
7549                 case RCVLSC_MODE:
7550                 case DMAC_MODE:
7551                 case MBFREE_MODE:
7552                 case BUFMGR_MODE:
7553                 case MEMARB_MODE:
7554                         /* We can't enable/disable these bits of the
7555                          * 5705/5750, just say success.
7556                          */
7557                         return 0;
7558
7559                 default:
7560                         break;
7561                 }
7562         }
7563
7564         val = tr32(ofs);
7565         val &= ~enable_bit;
7566         tw32_f(ofs, val);
7567
7568         for (i = 0; i < MAX_WAIT_CNT; i++) {
7569                 udelay(100);
7570                 val = tr32(ofs);
7571                 if ((val & enable_bit) == 0)
7572                         break;
7573         }
7574
7575         if (i == MAX_WAIT_CNT && !silent) {
7576                 dev_err(&tp->pdev->dev,
7577                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7578                         ofs, enable_bit);
7579                 return -ENODEV;
7580         }
7581
7582         return 0;
7583 }
7584
7585 /* tp->lock is held. */
7586 static int tg3_abort_hw(struct tg3 *tp, int silent)
7587 {
7588         int i, err;
7589
7590         tg3_disable_ints(tp);
7591
7592         tp->rx_mode &= ~RX_MODE_ENABLE;
7593         tw32_f(MAC_RX_MODE, tp->rx_mode);
7594         udelay(10);
7595
7596         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7597         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7598         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7599         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7600         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7601         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7602
7603         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7604         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7605         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7606         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7607         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7608         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7609         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7610
7611         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7612         tw32_f(MAC_MODE, tp->mac_mode);
7613         udelay(40);
7614
7615         tp->tx_mode &= ~TX_MODE_ENABLE;
7616         tw32_f(MAC_TX_MODE, tp->tx_mode);
7617
7618         for (i = 0; i < MAX_WAIT_CNT; i++) {
7619                 udelay(100);
7620                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7621                         break;
7622         }
7623         if (i >= MAX_WAIT_CNT) {
7624                 dev_err(&tp->pdev->dev,
7625                         "%s timed out, TX_MODE_ENABLE will not clear "
7626                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7627                 err |= -ENODEV;
7628         }
7629
7630         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7631         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7632         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7633
7634         tw32(FTQ_RESET, 0xffffffff);
7635         tw32(FTQ_RESET, 0x00000000);
7636
7637         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7638         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7639
7640         for (i = 0; i < tp->irq_cnt; i++) {
7641                 struct tg3_napi *tnapi = &tp->napi[i];
7642                 if (tnapi->hw_status)
7643                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7644         }
7645         if (tp->hw_stats)
7646                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7647
7648         return err;
7649 }
7650
7651 /* Save PCI command register before chip reset */
7652 static void tg3_save_pci_state(struct tg3 *tp)
7653 {
7654         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7655 }
7656
7657 /* Restore PCI state after chip reset */
7658 static void tg3_restore_pci_state(struct tg3 *tp)
7659 {
7660         u32 val;
7661
7662         /* Re-enable indirect register accesses. */
7663         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7664                                tp->misc_host_ctrl);
7665
7666         /* Set MAX PCI retry to zero. */
7667         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7668         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7669             tg3_flag(tp, PCIX_MODE))
7670                 val |= PCISTATE_RETRY_SAME_DMA;
7671         /* Allow reads and writes to the APE register and memory space. */
7672         if (tg3_flag(tp, ENABLE_APE))
7673                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7674                        PCISTATE_ALLOW_APE_SHMEM_WR |
7675                        PCISTATE_ALLOW_APE_PSPACE_WR;
7676         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7677
7678         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7679
7680         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7681                 if (tg3_flag(tp, PCI_EXPRESS))
7682                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7683                 else {
7684                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7685                                               tp->pci_cacheline_sz);
7686                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7687                                               tp->pci_lat_timer);
7688                 }
7689         }
7690
7691         /* Make sure PCI-X relaxed ordering bit is clear. */
7692         if (tg3_flag(tp, PCIX_MODE)) {
7693                 u16 pcix_cmd;
7694
7695                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7696                                      &pcix_cmd);
7697                 pcix_cmd &= ~PCI_X_CMD_ERO;
7698                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7699                                       pcix_cmd);
7700         }
7701
7702         if (tg3_flag(tp, 5780_CLASS)) {
7703
7704                 /* Chip reset on 5780 will reset MSI enable bit,
7705                  * so need to restore it.
7706                  */
7707                 if (tg3_flag(tp, USING_MSI)) {
7708                         u16 ctrl;
7709
7710                         pci_read_config_word(tp->pdev,
7711                                              tp->msi_cap + PCI_MSI_FLAGS,
7712                                              &ctrl);
7713                         pci_write_config_word(tp->pdev,
7714                                               tp->msi_cap + PCI_MSI_FLAGS,
7715                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7716                         val = tr32(MSGINT_MODE);
7717                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7718                 }
7719         }
7720 }
7721
7722 /* tp->lock is held. */
7723 static int tg3_chip_reset(struct tg3 *tp)
7724 {
7725         u32 val;
7726         void (*write_op)(struct tg3 *, u32, u32);
7727         int i, err;
7728
7729         tg3_nvram_lock(tp);
7730
7731         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7732
7733         /* No matching tg3_nvram_unlock() after this because
7734          * chip reset below will undo the nvram lock.
7735          */
7736         tp->nvram_lock_cnt = 0;
7737
7738         /* GRC_MISC_CFG core clock reset will clear the memory
7739          * enable bit in PCI register 4 and the MSI enable bit
7740          * on some chips, so we save relevant registers here.
7741          */
7742         tg3_save_pci_state(tp);
7743
7744         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7745             tg3_flag(tp, 5755_PLUS))
7746                 tw32(GRC_FASTBOOT_PC, 0);
7747
7748         /*
7749          * We must avoid the readl() that normally takes place.
7750          * It locks machines, causes machine checks, and other
7751          * fun things.  So, temporarily disable the 5701
7752          * hardware workaround, while we do the reset.
7753          */
7754         write_op = tp->write32;
7755         if (write_op == tg3_write_flush_reg32)
7756                 tp->write32 = tg3_write32;
7757
7758         /* Prevent the irq handler from reading or writing PCI registers
7759          * during chip reset when the memory enable bit in the PCI command
7760          * register may be cleared.  The chip does not generate interrupt
7761          * at this time, but the irq handler may still be called due to irq
7762          * sharing or irqpoll.
7763          */
7764         tg3_flag_set(tp, CHIP_RESETTING);
7765         for (i = 0; i < tp->irq_cnt; i++) {
7766                 struct tg3_napi *tnapi = &tp->napi[i];
7767                 if (tnapi->hw_status) {
7768                         tnapi->hw_status->status = 0;
7769                         tnapi->hw_status->status_tag = 0;
7770                 }
7771                 tnapi->last_tag = 0;
7772                 tnapi->last_irq_tag = 0;
7773         }
7774         smp_mb();
7775
7776         for (i = 0; i < tp->irq_cnt; i++)
7777                 synchronize_irq(tp->napi[i].irq_vec);
7778
7779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7780                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7781                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7782         }
7783
7784         /* do the reset */
7785         val = GRC_MISC_CFG_CORECLK_RESET;
7786
7787         if (tg3_flag(tp, PCI_EXPRESS)) {
7788                 /* Force PCIe 1.0a mode */
7789                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7790                     !tg3_flag(tp, 57765_PLUS) &&
7791                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7792                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7793                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7794
7795                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7796                         tw32(GRC_MISC_CFG, (1 << 29));
7797                         val |= (1 << 29);
7798                 }
7799         }
7800
7801         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7802                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7803                 tw32(GRC_VCPU_EXT_CTRL,
7804                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7805         }
7806
7807         /* Manage gphy power for all CPMU absent PCIe devices. */
7808         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7809                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7810
7811         tw32(GRC_MISC_CFG, val);
7812
7813         /* restore 5701 hardware bug workaround write method */
7814         tp->write32 = write_op;
7815
7816         /* Unfortunately, we have to delay before the PCI read back.
7817          * Some 575X chips even will not respond to a PCI cfg access
7818          * when the reset command is given to the chip.
7819          *
7820          * How do these hardware designers expect things to work
7821          * properly if the PCI write is posted for a long period
7822          * of time?  It is always necessary to have some method by
7823          * which a register read back can occur to push the write
7824          * out which does the reset.
7825          *
7826          * For most tg3 variants the trick below was working.
7827          * Ho hum...
7828          */
7829         udelay(120);
7830
7831         /* Flush PCI posted writes.  The normal MMIO registers
7832          * are inaccessible at this time so this is the only
7833          * way to make this reliably (actually, this is no longer
7834          * the case, see above).  I tried to use indirect
7835          * register read/write but this upset some 5701 variants.
7836          */
7837         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7838
7839         udelay(120);
7840
7841         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7842                 u16 val16;
7843
7844                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7845                         int i;
7846                         u32 cfg_val;
7847
7848                         /* Wait for link training to complete.  */
7849                         for (i = 0; i < 5000; i++)
7850                                 udelay(100);
7851
7852                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7853                         pci_write_config_dword(tp->pdev, 0xc4,
7854                                                cfg_val | (1 << 15));
7855                 }
7856
7857                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7858                 pci_read_config_word(tp->pdev,
7859                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7860                                      &val16);
7861                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7862                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7863                 /*
7864                  * Older PCIe devices only support the 128 byte
7865                  * MPS setting.  Enforce the restriction.
7866                  */
7867                 if (!tg3_flag(tp, CPMU_PRESENT))
7868                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7869                 pci_write_config_word(tp->pdev,
7870                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7871                                       val16);
7872
7873                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7874
7875                 /* Clear error status */
7876                 pci_write_config_word(tp->pdev,
7877                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7878                                       PCI_EXP_DEVSTA_CED |
7879                                       PCI_EXP_DEVSTA_NFED |
7880                                       PCI_EXP_DEVSTA_FED |
7881                                       PCI_EXP_DEVSTA_URD);
7882         }
7883
7884         tg3_restore_pci_state(tp);
7885
7886         tg3_flag_clear(tp, CHIP_RESETTING);
7887         tg3_flag_clear(tp, ERROR_PROCESSED);
7888
7889         val = 0;
7890         if (tg3_flag(tp, 5780_CLASS))
7891                 val = tr32(MEMARB_MODE);
7892         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7893
7894         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7895                 tg3_stop_fw(tp);
7896                 tw32(0x5000, 0x400);
7897         }
7898
7899         tw32(GRC_MODE, tp->grc_mode);
7900
7901         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7902                 val = tr32(0xc4);
7903
7904                 tw32(0xc4, val | (1 << 15));
7905         }
7906
7907         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7909                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7910                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7911                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7912                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7913         }
7914
7915         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7916                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7917                 val = tp->mac_mode;
7918         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7919                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7920                 val = tp->mac_mode;
7921         } else
7922                 val = 0;
7923
7924         tw32_f(MAC_MODE, val);
7925         udelay(40);
7926
7927         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7928
7929         err = tg3_poll_fw(tp);
7930         if (err)
7931                 return err;
7932
7933         tg3_mdio_start(tp);
7934
7935         if (tg3_flag(tp, PCI_EXPRESS) &&
7936             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7937             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7938             !tg3_flag(tp, 57765_PLUS)) {
7939                 val = tr32(0x7c00);
7940
7941                 tw32(0x7c00, val | (1 << 25));
7942         }
7943
7944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7945                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7946                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7947         }
7948
7949         /* Reprobe ASF enable state.  */
7950         tg3_flag_clear(tp, ENABLE_ASF);
7951         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7952         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7953         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7954                 u32 nic_cfg;
7955
7956                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7957                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7958                         tg3_flag_set(tp, ENABLE_ASF);
7959                         tp->last_event_jiffies = jiffies;
7960                         if (tg3_flag(tp, 5750_PLUS))
7961                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7962                 }
7963         }
7964
7965         return 0;
7966 }
7967
7968 /* tp->lock is held. */
7969 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7970 {
7971         int err;
7972
7973         tg3_stop_fw(tp);
7974
7975         tg3_write_sig_pre_reset(tp, kind);
7976
7977         tg3_abort_hw(tp, silent);
7978         err = tg3_chip_reset(tp);
7979
7980         __tg3_set_mac_addr(tp, 0);
7981
7982         tg3_write_sig_legacy(tp, kind);
7983         tg3_write_sig_post_reset(tp, kind);
7984
7985         if (err)
7986                 return err;
7987
7988         return 0;
7989 }
7990
7991 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7992 {
7993         struct tg3 *tp = netdev_priv(dev);
7994         struct sockaddr *addr = p;
7995         int err = 0, skip_mac_1 = 0;
7996
7997         if (!is_valid_ether_addr(addr->sa_data))
7998                 return -EINVAL;
7999
8000         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8001
8002         if (!netif_running(dev))
8003                 return 0;
8004
8005         if (tg3_flag(tp, ENABLE_ASF)) {
8006                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8007
8008                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8009                 addr0_low = tr32(MAC_ADDR_0_LOW);
8010                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8011                 addr1_low = tr32(MAC_ADDR_1_LOW);
8012
8013                 /* Skip MAC addr 1 if ASF is using it. */
8014                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8015                     !(addr1_high == 0 && addr1_low == 0))
8016                         skip_mac_1 = 1;
8017         }
8018         spin_lock_bh(&tp->lock);
8019         __tg3_set_mac_addr(tp, skip_mac_1);
8020         spin_unlock_bh(&tp->lock);
8021
8022         return err;
8023 }
8024
8025 /* tp->lock is held. */
8026 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8027                            dma_addr_t mapping, u32 maxlen_flags,
8028                            u32 nic_addr)
8029 {
8030         tg3_write_mem(tp,
8031                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8032                       ((u64) mapping >> 32));
8033         tg3_write_mem(tp,
8034                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8035                       ((u64) mapping & 0xffffffff));
8036         tg3_write_mem(tp,
8037                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8038                        maxlen_flags);
8039
8040         if (!tg3_flag(tp, 5705_PLUS))
8041                 tg3_write_mem(tp,
8042                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8043                               nic_addr);
8044 }
8045
8046 static void __tg3_set_rx_mode(struct net_device *);
8047 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8048 {
8049         int i;
8050
8051         if (!tg3_flag(tp, ENABLE_TSS)) {
8052                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8053                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8054                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8055         } else {
8056                 tw32(HOSTCC_TXCOL_TICKS, 0);
8057                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8058                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8059         }
8060
8061         if (!tg3_flag(tp, ENABLE_RSS)) {
8062                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8063                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8064                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8065         } else {
8066                 tw32(HOSTCC_RXCOL_TICKS, 0);
8067                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8068                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8069         }
8070
8071         if (!tg3_flag(tp, 5705_PLUS)) {
8072                 u32 val = ec->stats_block_coalesce_usecs;
8073
8074                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8075                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8076
8077                 if (!netif_carrier_ok(tp->dev))
8078                         val = 0;
8079
8080                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8081         }
8082
8083         for (i = 0; i < tp->irq_cnt - 1; i++) {
8084                 u32 reg;
8085
8086                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8087                 tw32(reg, ec->rx_coalesce_usecs);
8088                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8089                 tw32(reg, ec->rx_max_coalesced_frames);
8090                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8091                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8092
8093                 if (tg3_flag(tp, ENABLE_TSS)) {
8094                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8095                         tw32(reg, ec->tx_coalesce_usecs);
8096                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8097                         tw32(reg, ec->tx_max_coalesced_frames);
8098                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8099                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8100                 }
8101         }
8102
8103         for (; i < tp->irq_max - 1; i++) {
8104                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8105                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8106                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8107
8108                 if (tg3_flag(tp, ENABLE_TSS)) {
8109                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8110                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8111                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8112                 }
8113         }
8114 }
8115
8116 /* tp->lock is held. */
8117 static void tg3_rings_reset(struct tg3 *tp)
8118 {
8119         int i;
8120         u32 stblk, txrcb, rxrcb, limit;
8121         struct tg3_napi *tnapi = &tp->napi[0];
8122
8123         /* Disable all transmit rings but the first. */
8124         if (!tg3_flag(tp, 5705_PLUS))
8125                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8126         else if (tg3_flag(tp, 5717_PLUS))
8127                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8128         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8129                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8130         else
8131                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8132
8133         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8134              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8135                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8136                               BDINFO_FLAGS_DISABLED);
8137
8138
8139         /* Disable all receive return rings but the first. */
8140         if (tg3_flag(tp, 5717_PLUS))
8141                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8142         else if (!tg3_flag(tp, 5705_PLUS))
8143                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8144         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8145                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8146                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8147         else
8148                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8149
8150         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8151              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8152                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8153                               BDINFO_FLAGS_DISABLED);
8154
8155         /* Disable interrupts */
8156         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8157         tp->napi[0].chk_msi_cnt = 0;
8158         tp->napi[0].last_rx_cons = 0;
8159         tp->napi[0].last_tx_cons = 0;
8160
8161         /* Zero mailbox registers. */
8162         if (tg3_flag(tp, SUPPORT_MSIX)) {
8163                 for (i = 1; i < tp->irq_max; i++) {
8164                         tp->napi[i].tx_prod = 0;
8165                         tp->napi[i].tx_cons = 0;
8166                         if (tg3_flag(tp, ENABLE_TSS))
8167                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8168                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8169                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8170                         tp->napi[i].chk_msi_cnt = 0;
8171                         tp->napi[i].last_rx_cons = 0;
8172                         tp->napi[i].last_tx_cons = 0;
8173                 }
8174                 if (!tg3_flag(tp, ENABLE_TSS))
8175                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8176         } else {
8177                 tp->napi[0].tx_prod = 0;
8178                 tp->napi[0].tx_cons = 0;
8179                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8180                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8181         }
8182
8183         /* Make sure the NIC-based send BD rings are disabled. */
8184         if (!tg3_flag(tp, 5705_PLUS)) {
8185                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8186                 for (i = 0; i < 16; i++)
8187                         tw32_tx_mbox(mbox + i * 8, 0);
8188         }
8189
8190         txrcb = NIC_SRAM_SEND_RCB;
8191         rxrcb = NIC_SRAM_RCV_RET_RCB;
8192
8193         /* Clear status block in ram. */
8194         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8195
8196         /* Set status block DMA address */
8197         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8198              ((u64) tnapi->status_mapping >> 32));
8199         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8200              ((u64) tnapi->status_mapping & 0xffffffff));
8201
8202         if (tnapi->tx_ring) {
8203                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8204                                (TG3_TX_RING_SIZE <<
8205                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8206                                NIC_SRAM_TX_BUFFER_DESC);
8207                 txrcb += TG3_BDINFO_SIZE;
8208         }
8209
8210         if (tnapi->rx_rcb) {
8211                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8212                                (tp->rx_ret_ring_mask + 1) <<
8213                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8214                 rxrcb += TG3_BDINFO_SIZE;
8215         }
8216
8217         stblk = HOSTCC_STATBLCK_RING1;
8218
8219         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8220                 u64 mapping = (u64)tnapi->status_mapping;
8221                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8222                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8223
8224                 /* Clear status block in ram. */
8225                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8226
8227                 if (tnapi->tx_ring) {
8228                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8229                                        (TG3_TX_RING_SIZE <<
8230                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8231                                        NIC_SRAM_TX_BUFFER_DESC);
8232                         txrcb += TG3_BDINFO_SIZE;
8233                 }
8234
8235                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8236                                ((tp->rx_ret_ring_mask + 1) <<
8237                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8238
8239                 stblk += 8;
8240                 rxrcb += TG3_BDINFO_SIZE;
8241         }
8242 }
8243
8244 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8245 {
8246         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8247
8248         if (!tg3_flag(tp, 5750_PLUS) ||
8249             tg3_flag(tp, 5780_CLASS) ||
8250             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8252                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8253         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8254                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8255                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8256         else
8257                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8258
8259         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8260         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8261
8262         val = min(nic_rep_thresh, host_rep_thresh);
8263         tw32(RCVBDI_STD_THRESH, val);
8264
8265         if (tg3_flag(tp, 57765_PLUS))
8266                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8267
8268         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8269                 return;
8270
8271         if (!tg3_flag(tp, 5705_PLUS))
8272                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8273         else
8274                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8275
8276         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8277
8278         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8279         tw32(RCVBDI_JUMBO_THRESH, val);
8280
8281         if (tg3_flag(tp, 57765_PLUS))
8282                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8283 }
8284
8285 /* tp->lock is held. */
8286 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8287 {
8288         u32 val, rdmac_mode;
8289         int i, err, limit;
8290         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8291
8292         tg3_disable_ints(tp);
8293
8294         tg3_stop_fw(tp);
8295
8296         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8297
8298         if (tg3_flag(tp, INIT_COMPLETE))
8299                 tg3_abort_hw(tp, 1);
8300
8301         /* Enable MAC control of LPI */
8302         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8303                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8304                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8305                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8306
8307                 tw32_f(TG3_CPMU_EEE_CTRL,
8308                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8309
8310                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8311                       TG3_CPMU_EEEMD_LPI_IN_TX |
8312                       TG3_CPMU_EEEMD_LPI_IN_RX |
8313                       TG3_CPMU_EEEMD_EEE_ENABLE;
8314
8315                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8316                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8317
8318                 if (tg3_flag(tp, ENABLE_APE))
8319                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8320
8321                 tw32_f(TG3_CPMU_EEE_MODE, val);
8322
8323                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8324                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8325                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8326
8327                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8328                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8329                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8330         }
8331
8332         if (reset_phy)
8333                 tg3_phy_reset(tp);
8334
8335         err = tg3_chip_reset(tp);
8336         if (err)
8337                 return err;
8338
8339         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8340
8341         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8342                 val = tr32(TG3_CPMU_CTRL);
8343                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8344                 tw32(TG3_CPMU_CTRL, val);
8345
8346                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8347                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8348                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8349                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8350
8351                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8352                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8353                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8354                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8355
8356                 val = tr32(TG3_CPMU_HST_ACC);
8357                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8358                 val |= CPMU_HST_ACC_MACCLK_6_25;
8359                 tw32(TG3_CPMU_HST_ACC, val);
8360         }
8361
8362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8363                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8364                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8365                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8366                 tw32(PCIE_PWR_MGMT_THRESH, val);
8367
8368                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8369                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8370
8371                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8372
8373                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8374                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8375         }
8376
8377         if (tg3_flag(tp, L1PLLPD_EN)) {
8378                 u32 grc_mode = tr32(GRC_MODE);
8379
8380                 /* Access the lower 1K of PL PCIE block registers. */
8381                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8382                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8383
8384                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8385                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8386                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8387
8388                 tw32(GRC_MODE, grc_mode);
8389         }
8390
8391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8392                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8393                         u32 grc_mode = tr32(GRC_MODE);
8394
8395                         /* Access the lower 1K of PL PCIE block registers. */
8396                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8397                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8398
8399                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8400                                    TG3_PCIE_PL_LO_PHYCTL5);
8401                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8402                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8403
8404                         tw32(GRC_MODE, grc_mode);
8405                 }
8406
8407                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8408                         u32 grc_mode = tr32(GRC_MODE);
8409
8410                         /* Access the lower 1K of DL PCIE block registers. */
8411                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8412                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8413
8414                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8415                                    TG3_PCIE_DL_LO_FTSMAX);
8416                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8417                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8418                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8419
8420                         tw32(GRC_MODE, grc_mode);
8421                 }
8422
8423                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8424                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8425                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8426                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8427         }
8428
8429         /* This works around an issue with Athlon chipsets on
8430          * B3 tigon3 silicon.  This bit has no effect on any
8431          * other revision.  But do not set this on PCI Express
8432          * chips and don't even touch the clocks if the CPMU is present.
8433          */
8434         if (!tg3_flag(tp, CPMU_PRESENT)) {
8435                 if (!tg3_flag(tp, PCI_EXPRESS))
8436                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8437                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8438         }
8439
8440         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8441             tg3_flag(tp, PCIX_MODE)) {
8442                 val = tr32(TG3PCI_PCISTATE);
8443                 val |= PCISTATE_RETRY_SAME_DMA;
8444                 tw32(TG3PCI_PCISTATE, val);
8445         }
8446
8447         if (tg3_flag(tp, ENABLE_APE)) {
8448                 /* Allow reads and writes to the
8449                  * APE register and memory space.
8450                  */
8451                 val = tr32(TG3PCI_PCISTATE);
8452                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8453                        PCISTATE_ALLOW_APE_SHMEM_WR |
8454                        PCISTATE_ALLOW_APE_PSPACE_WR;
8455                 tw32(TG3PCI_PCISTATE, val);
8456         }
8457
8458         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8459                 /* Enable some hw fixes.  */
8460                 val = tr32(TG3PCI_MSI_DATA);
8461                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8462                 tw32(TG3PCI_MSI_DATA, val);
8463         }
8464
8465         /* Descriptor ring init may make accesses to the
8466          * NIC SRAM area to setup the TX descriptors, so we
8467          * can only do this after the hardware has been
8468          * successfully reset.
8469          */
8470         err = tg3_init_rings(tp);
8471         if (err)
8472                 return err;
8473
8474         if (tg3_flag(tp, 57765_PLUS)) {
8475                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8476                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8477                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8478                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8479                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8480                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8481                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8482                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8483         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8484                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8485                 /* This value is determined during the probe time DMA
8486                  * engine test, tg3_test_dma.
8487                  */
8488                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8489         }
8490
8491         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8492                           GRC_MODE_4X_NIC_SEND_RINGS |
8493                           GRC_MODE_NO_TX_PHDR_CSUM |
8494                           GRC_MODE_NO_RX_PHDR_CSUM);
8495         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8496
8497         /* Pseudo-header checksum is done by hardware logic and not
8498          * the offload processers, so make the chip do the pseudo-
8499          * header checksums on receive.  For transmit it is more
8500          * convenient to do the pseudo-header checksum in software
8501          * as Linux does that on transmit for us in all cases.
8502          */
8503         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8504
8505         tw32(GRC_MODE,
8506              tp->grc_mode |
8507              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8508
8509         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8510         val = tr32(GRC_MISC_CFG);
8511         val &= ~0xff;
8512         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8513         tw32(GRC_MISC_CFG, val);
8514
8515         /* Initialize MBUF/DESC pool. */
8516         if (tg3_flag(tp, 5750_PLUS)) {
8517                 /* Do nothing.  */
8518         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8519                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8520                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8521                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8522                 else
8523                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8524                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8525                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8526         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8527                 int fw_len;
8528
8529                 fw_len = tp->fw_len;
8530                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8531                 tw32(BUFMGR_MB_POOL_ADDR,
8532                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8533                 tw32(BUFMGR_MB_POOL_SIZE,
8534                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8535         }
8536
8537         if (tp->dev->mtu <= ETH_DATA_LEN) {
8538                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8539                      tp->bufmgr_config.mbuf_read_dma_low_water);
8540                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8541                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8542                 tw32(BUFMGR_MB_HIGH_WATER,
8543                      tp->bufmgr_config.mbuf_high_water);
8544         } else {
8545                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8546                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8547                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8548                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8549                 tw32(BUFMGR_MB_HIGH_WATER,
8550                      tp->bufmgr_config.mbuf_high_water_jumbo);
8551         }
8552         tw32(BUFMGR_DMA_LOW_WATER,
8553              tp->bufmgr_config.dma_low_water);
8554         tw32(BUFMGR_DMA_HIGH_WATER,
8555              tp->bufmgr_config.dma_high_water);
8556
8557         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8559                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8561             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8562             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8563                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8564         tw32(BUFMGR_MODE, val);
8565         for (i = 0; i < 2000; i++) {
8566                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8567                         break;
8568                 udelay(10);
8569         }
8570         if (i >= 2000) {
8571                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8572                 return -ENODEV;
8573         }
8574
8575         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8576                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8577
8578         tg3_setup_rxbd_thresholds(tp);
8579
8580         /* Initialize TG3_BDINFO's at:
8581          *  RCVDBDI_STD_BD:     standard eth size rx ring
8582          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8583          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8584          *
8585          * like so:
8586          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8587          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8588          *                              ring attribute flags
8589          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8590          *
8591          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8592          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8593          *
8594          * The size of each ring is fixed in the firmware, but the location is
8595          * configurable.
8596          */
8597         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8598              ((u64) tpr->rx_std_mapping >> 32));
8599         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8600              ((u64) tpr->rx_std_mapping & 0xffffffff));
8601         if (!tg3_flag(tp, 5717_PLUS))
8602                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8603                      NIC_SRAM_RX_BUFFER_DESC);
8604
8605         /* Disable the mini ring */
8606         if (!tg3_flag(tp, 5705_PLUS))
8607                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8608                      BDINFO_FLAGS_DISABLED);
8609
8610         /* Program the jumbo buffer descriptor ring control
8611          * blocks on those devices that have them.
8612          */
8613         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8614             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8615
8616                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8617                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8618                              ((u64) tpr->rx_jmb_mapping >> 32));
8619                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8620                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8621                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8622                               BDINFO_FLAGS_MAXLEN_SHIFT;
8623                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8624                              val | BDINFO_FLAGS_USE_EXT_RECV);
8625                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8626                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8627                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8628                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8629                 } else {
8630                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8631                              BDINFO_FLAGS_DISABLED);
8632                 }
8633
8634                 if (tg3_flag(tp, 57765_PLUS)) {
8635                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8636                                 val = TG3_RX_STD_MAX_SIZE_5700;
8637                         else
8638                                 val = TG3_RX_STD_MAX_SIZE_5717;
8639                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8640                         val |= (TG3_RX_STD_DMA_SZ << 2);
8641                 } else
8642                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8643         } else
8644                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8645
8646         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8647
8648         tpr->rx_std_prod_idx = tp->rx_pending;
8649         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8650
8651         tpr->rx_jmb_prod_idx =
8652                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8653         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8654
8655         tg3_rings_reset(tp);
8656
8657         /* Initialize MAC address and backoff seed. */
8658         __tg3_set_mac_addr(tp, 0);
8659
8660         /* MTU + ethernet header + FCS + optional VLAN tag */
8661         tw32(MAC_RX_MTU_SIZE,
8662              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8663
8664         /* The slot time is changed by tg3_setup_phy if we
8665          * run at gigabit with half duplex.
8666          */
8667         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8668               (6 << TX_LENGTHS_IPG_SHIFT) |
8669               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8670
8671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8672                 val |= tr32(MAC_TX_LENGTHS) &
8673                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8674                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8675
8676         tw32(MAC_TX_LENGTHS, val);
8677
8678         /* Receive rules. */
8679         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8680         tw32(RCVLPC_CONFIG, 0x0181);
8681
8682         /* Calculate RDMAC_MODE setting early, we need it to determine
8683          * the RCVLPC_STATE_ENABLE mask.
8684          */
8685         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8686                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8687                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8688                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8689                       RDMAC_MODE_LNGREAD_ENAB);
8690
8691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8692                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8693
8694         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8695             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8696             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8697                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8698                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8699                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8700
8701         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8702             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8703                 if (tg3_flag(tp, TSO_CAPABLE) &&
8704                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8705                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8706                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8707                            !tg3_flag(tp, IS_5788)) {
8708                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8709                 }
8710         }
8711
8712         if (tg3_flag(tp, PCI_EXPRESS))
8713                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8714
8715         if (tg3_flag(tp, HW_TSO_1) ||
8716             tg3_flag(tp, HW_TSO_2) ||
8717             tg3_flag(tp, HW_TSO_3))
8718                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8719
8720         if (tg3_flag(tp, 57765_PLUS) ||
8721             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8722             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8723                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8724
8725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8726                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8727
8728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8729             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8730             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8731             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8732             tg3_flag(tp, 57765_PLUS)) {
8733                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8734                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8735                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8736                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8737                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8738                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8739                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8740                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8741                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8742                 }
8743                 tw32(TG3_RDMA_RSRVCTRL_REG,
8744                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8745         }
8746
8747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8748             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8749                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8750                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8751                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8752                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8753         }
8754
8755         /* Receive/send statistics. */
8756         if (tg3_flag(tp, 5750_PLUS)) {
8757                 val = tr32(RCVLPC_STATS_ENABLE);
8758                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8759                 tw32(RCVLPC_STATS_ENABLE, val);
8760         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8761                    tg3_flag(tp, TSO_CAPABLE)) {
8762                 val = tr32(RCVLPC_STATS_ENABLE);
8763                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8764                 tw32(RCVLPC_STATS_ENABLE, val);
8765         } else {
8766                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8767         }
8768         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8769         tw32(SNDDATAI_STATSENAB, 0xffffff);
8770         tw32(SNDDATAI_STATSCTRL,
8771              (SNDDATAI_SCTRL_ENABLE |
8772               SNDDATAI_SCTRL_FASTUPD));
8773
8774         /* Setup host coalescing engine. */
8775         tw32(HOSTCC_MODE, 0);
8776         for (i = 0; i < 2000; i++) {
8777                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8778                         break;
8779                 udelay(10);
8780         }
8781
8782         __tg3_set_coalesce(tp, &tp->coal);
8783
8784         if (!tg3_flag(tp, 5705_PLUS)) {
8785                 /* Status/statistics block address.  See tg3_timer,
8786                  * the tg3_periodic_fetch_stats call there, and
8787                  * tg3_get_stats to see how this works for 5705/5750 chips.
8788                  */
8789                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8790                      ((u64) tp->stats_mapping >> 32));
8791                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8792                      ((u64) tp->stats_mapping & 0xffffffff));
8793                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8794
8795                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8796
8797                 /* Clear statistics and status block memory areas */
8798                 for (i = NIC_SRAM_STATS_BLK;
8799                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8800                      i += sizeof(u32)) {
8801                         tg3_write_mem(tp, i, 0);
8802                         udelay(40);
8803                 }
8804         }
8805
8806         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8807
8808         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8809         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8810         if (!tg3_flag(tp, 5705_PLUS))
8811                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8812
8813         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8814                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8815                 /* reset to prevent losing 1st rx packet intermittently */
8816                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8817                 udelay(10);
8818         }
8819
8820         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8821                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8822                         MAC_MODE_FHDE_ENABLE;
8823         if (tg3_flag(tp, ENABLE_APE))
8824                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8825         if (!tg3_flag(tp, 5705_PLUS) &&
8826             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8827             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8828                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8829         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8830         udelay(40);
8831
8832         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8833          * If TG3_FLAG_IS_NIC is zero, we should read the
8834          * register to preserve the GPIO settings for LOMs. The GPIOs,
8835          * whether used as inputs or outputs, are set by boot code after
8836          * reset.
8837          */
8838         if (!tg3_flag(tp, IS_NIC)) {
8839                 u32 gpio_mask;
8840
8841                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8842                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8843                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8844
8845                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8846                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8847                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8848
8849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8850                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8851
8852                 tp->grc_local_ctrl &= ~gpio_mask;
8853                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8854
8855                 /* GPIO1 must be driven high for eeprom write protect */
8856                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8857                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8858                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8859         }
8860         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8861         udelay(100);
8862
8863         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8864                 val = tr32(MSGINT_MODE);
8865                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8866                 if (!tg3_flag(tp, 1SHOT_MSI))
8867                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8868                 tw32(MSGINT_MODE, val);
8869         }
8870
8871         if (!tg3_flag(tp, 5705_PLUS)) {
8872                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8873                 udelay(40);
8874         }
8875
8876         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8877                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8878                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8879                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8880                WDMAC_MODE_LNGREAD_ENAB);
8881
8882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8883             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8884                 if (tg3_flag(tp, TSO_CAPABLE) &&
8885                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8886                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8887                         /* nothing */
8888                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8889                            !tg3_flag(tp, IS_5788)) {
8890                         val |= WDMAC_MODE_RX_ACCEL;
8891                 }
8892         }
8893
8894         /* Enable host coalescing bug fix */
8895         if (tg3_flag(tp, 5755_PLUS))
8896                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8897
8898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8899                 val |= WDMAC_MODE_BURST_ALL_DATA;
8900
8901         tw32_f(WDMAC_MODE, val);
8902         udelay(40);
8903
8904         if (tg3_flag(tp, PCIX_MODE)) {
8905                 u16 pcix_cmd;
8906
8907                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8908                                      &pcix_cmd);
8909                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8910                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8911                         pcix_cmd |= PCI_X_CMD_READ_2K;
8912                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8913                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8914                         pcix_cmd |= PCI_X_CMD_READ_2K;
8915                 }
8916                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8917                                       pcix_cmd);
8918         }
8919
8920         tw32_f(RDMAC_MODE, rdmac_mode);
8921         udelay(40);
8922
8923         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8924         if (!tg3_flag(tp, 5705_PLUS))
8925                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8926
8927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8928                 tw32(SNDDATAC_MODE,
8929                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8930         else
8931                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8932
8933         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8934         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8935         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8936         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8937                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8938         tw32(RCVDBDI_MODE, val);
8939         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8940         if (tg3_flag(tp, HW_TSO_1) ||
8941             tg3_flag(tp, HW_TSO_2) ||
8942             tg3_flag(tp, HW_TSO_3))
8943                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8944         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8945         if (tg3_flag(tp, ENABLE_TSS))
8946                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8947         tw32(SNDBDI_MODE, val);
8948         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8949
8950         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8951                 err = tg3_load_5701_a0_firmware_fix(tp);
8952                 if (err)
8953                         return err;
8954         }
8955
8956         if (tg3_flag(tp, TSO_CAPABLE)) {
8957                 err = tg3_load_tso_firmware(tp);
8958                 if (err)
8959                         return err;
8960         }
8961
8962         tp->tx_mode = TX_MODE_ENABLE;
8963
8964         if (tg3_flag(tp, 5755_PLUS) ||
8965             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8966                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8967
8968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8969                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8970                 tp->tx_mode &= ~val;
8971                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8972         }
8973
8974         tw32_f(MAC_TX_MODE, tp->tx_mode);
8975         udelay(100);
8976
8977         if (tg3_flag(tp, ENABLE_RSS)) {
8978                 int i = 0;
8979                 u32 reg = MAC_RSS_INDIR_TBL_0;
8980
8981                 if (tp->irq_cnt == 2) {
8982                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8983                                 tw32(reg, 0x0);
8984                                 reg += 4;
8985                         }
8986                 } else {
8987                         u32 val;
8988
8989                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8990                                 val = i % (tp->irq_cnt - 1);
8991                                 i++;
8992                                 for (; i % 8; i++) {
8993                                         val <<= 4;
8994                                         val |= (i % (tp->irq_cnt - 1));
8995                                 }
8996                                 tw32(reg, val);
8997                                 reg += 4;
8998                         }
8999                 }
9000
9001                 /* Setup the "secret" hash key. */
9002                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9003                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9004                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9005                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9006                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9007                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9008                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9009                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9010                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9011                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9012         }
9013
9014         tp->rx_mode = RX_MODE_ENABLE;
9015         if (tg3_flag(tp, 5755_PLUS))
9016                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9017
9018         if (tg3_flag(tp, ENABLE_RSS))
9019                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9020                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9021                                RX_MODE_RSS_IPV6_HASH_EN |
9022                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9023                                RX_MODE_RSS_IPV4_HASH_EN |
9024                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9025
9026         tw32_f(MAC_RX_MODE, tp->rx_mode);
9027         udelay(10);
9028
9029         tw32(MAC_LED_CTRL, tp->led_ctrl);
9030
9031         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9032         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9033                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9034                 udelay(10);
9035         }
9036         tw32_f(MAC_RX_MODE, tp->rx_mode);
9037         udelay(10);
9038
9039         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9040                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9041                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9042                         /* Set drive transmission level to 1.2V  */
9043                         /* only if the signal pre-emphasis bit is not set  */
9044                         val = tr32(MAC_SERDES_CFG);
9045                         val &= 0xfffff000;
9046                         val |= 0x880;
9047                         tw32(MAC_SERDES_CFG, val);
9048                 }
9049                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9050                         tw32(MAC_SERDES_CFG, 0x616000);
9051         }
9052
9053         /* Prevent chip from dropping frames when flow control
9054          * is enabled.
9055          */
9056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9057                 val = 1;
9058         else
9059                 val = 2;
9060         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9061
9062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9063             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9064                 /* Use hardware link auto-negotiation */
9065                 tg3_flag_set(tp, HW_AUTONEG);
9066         }
9067
9068         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9069             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9070                 u32 tmp;
9071
9072                 tmp = tr32(SERDES_RX_CTRL);
9073                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9074                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9075                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9076                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9077         }
9078
9079         if (!tg3_flag(tp, USE_PHYLIB)) {
9080                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9081                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9082                         tp->link_config.speed = tp->link_config.orig_speed;
9083                         tp->link_config.duplex = tp->link_config.orig_duplex;
9084                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9085                 }
9086
9087                 err = tg3_setup_phy(tp, 0);
9088                 if (err)
9089                         return err;
9090
9091                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9092                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9093                         u32 tmp;
9094
9095                         /* Clear CRC stats. */
9096                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9097                                 tg3_writephy(tp, MII_TG3_TEST1,
9098                                              tmp | MII_TG3_TEST1_CRC_EN);
9099                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9100                         }
9101                 }
9102         }
9103
9104         __tg3_set_rx_mode(tp->dev);
9105
9106         /* Initialize receive rules. */
9107         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9108         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9109         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9110         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9111
9112         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9113                 limit = 8;
9114         else
9115                 limit = 16;
9116         if (tg3_flag(tp, ENABLE_ASF))
9117                 limit -= 4;
9118         switch (limit) {
9119         case 16:
9120                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9121         case 15:
9122                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9123         case 14:
9124                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9125         case 13:
9126                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9127         case 12:
9128                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9129         case 11:
9130                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9131         case 10:
9132                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9133         case 9:
9134                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9135         case 8:
9136                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9137         case 7:
9138                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9139         case 6:
9140                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9141         case 5:
9142                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9143         case 4:
9144                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9145         case 3:
9146                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9147         case 2:
9148         case 1:
9149
9150         default:
9151                 break;
9152         }
9153
9154         if (tg3_flag(tp, ENABLE_APE))
9155                 /* Write our heartbeat update interval to APE. */
9156                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9157                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9158
9159         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9160
9161         return 0;
9162 }
9163
9164 /* Called at device open time to get the chip ready for
9165  * packet processing.  Invoked with tp->lock held.
9166  */
9167 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9168 {
9169         tg3_switch_clocks(tp);
9170
9171         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9172
9173         return tg3_reset_hw(tp, reset_phy);
9174 }
9175
9176 #define TG3_STAT_ADD32(PSTAT, REG) \
9177 do {    u32 __val = tr32(REG); \
9178         (PSTAT)->low += __val; \
9179         if ((PSTAT)->low < __val) \
9180                 (PSTAT)->high += 1; \
9181 } while (0)
9182
9183 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9184 {
9185         struct tg3_hw_stats *sp = tp->hw_stats;
9186
9187         if (!netif_carrier_ok(tp->dev))
9188                 return;
9189
9190         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9191         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9192         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9193         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9194         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9195         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9196         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9197         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9198         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9199         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9200         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9201         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9202         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9203
9204         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9205         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9206         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9207         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9208         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9209         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9210         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9211         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9212         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9213         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9214         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9215         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9216         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9217         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9218
9219         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9220         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9221             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9222             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9223                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9224         } else {
9225                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9226                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9227                 if (val) {
9228                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9229                         sp->rx_discards.low += val;
9230                         if (sp->rx_discards.low < val)
9231                                 sp->rx_discards.high += 1;
9232                 }
9233                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9234         }
9235         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9236 }
9237
9238 static void tg3_chk_missed_msi(struct tg3 *tp)
9239 {
9240         u32 i;
9241
9242         for (i = 0; i < tp->irq_cnt; i++) {
9243                 struct tg3_napi *tnapi = &tp->napi[i];
9244
9245                 if (tg3_has_work(tnapi)) {
9246                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9247                             tnapi->last_tx_cons == tnapi->tx_cons) {
9248                                 if (tnapi->chk_msi_cnt < 1) {
9249                                         tnapi->chk_msi_cnt++;
9250                                         return;
9251                                 }
9252                                 tg3_msi(0, tnapi);
9253                         }
9254                 }
9255                 tnapi->chk_msi_cnt = 0;
9256                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9257                 tnapi->last_tx_cons = tnapi->tx_cons;
9258         }
9259 }
9260
9261 static void tg3_timer(unsigned long __opaque)
9262 {
9263         struct tg3 *tp = (struct tg3 *) __opaque;
9264
9265         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9266                 goto restart_timer;
9267
9268         spin_lock(&tp->lock);
9269
9270         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9271             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9272                 tg3_chk_missed_msi(tp);
9273
9274         if (!tg3_flag(tp, TAGGED_STATUS)) {
9275                 /* All of this garbage is because when using non-tagged
9276                  * IRQ status the mailbox/status_block protocol the chip
9277                  * uses with the cpu is race prone.
9278                  */
9279                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9280                         tw32(GRC_LOCAL_CTRL,
9281                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9282                 } else {
9283                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9284                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9285                 }
9286
9287                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9288                         spin_unlock(&tp->lock);
9289                         tg3_reset_task_schedule(tp);
9290                         goto restart_timer;
9291                 }
9292         }
9293
9294         /* This part only runs once per second. */
9295         if (!--tp->timer_counter) {
9296                 if (tg3_flag(tp, 5705_PLUS))
9297                         tg3_periodic_fetch_stats(tp);
9298
9299                 if (tp->setlpicnt && !--tp->setlpicnt)
9300                         tg3_phy_eee_enable(tp);
9301
9302                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9303                         u32 mac_stat;
9304                         int phy_event;
9305
9306                         mac_stat = tr32(MAC_STATUS);
9307
9308                         phy_event = 0;
9309                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9310                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9311                                         phy_event = 1;
9312                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9313                                 phy_event = 1;
9314
9315                         if (phy_event)
9316                                 tg3_setup_phy(tp, 0);
9317                 } else if (tg3_flag(tp, POLL_SERDES)) {
9318                         u32 mac_stat = tr32(MAC_STATUS);
9319                         int need_setup = 0;
9320
9321                         if (netif_carrier_ok(tp->dev) &&
9322                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9323                                 need_setup = 1;
9324                         }
9325                         if (!netif_carrier_ok(tp->dev) &&
9326                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9327                                          MAC_STATUS_SIGNAL_DET))) {
9328                                 need_setup = 1;
9329                         }
9330                         if (need_setup) {
9331                                 if (!tp->serdes_counter) {
9332                                         tw32_f(MAC_MODE,
9333                                              (tp->mac_mode &
9334                                               ~MAC_MODE_PORT_MODE_MASK));
9335                                         udelay(40);
9336                                         tw32_f(MAC_MODE, tp->mac_mode);
9337                                         udelay(40);
9338                                 }
9339                                 tg3_setup_phy(tp, 0);
9340                         }
9341                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9342                            tg3_flag(tp, 5780_CLASS)) {
9343                         tg3_serdes_parallel_detect(tp);
9344                 }
9345
9346                 tp->timer_counter = tp->timer_multiplier;
9347         }
9348
9349         /* Heartbeat is only sent once every 2 seconds.
9350          *
9351          * The heartbeat is to tell the ASF firmware that the host
9352          * driver is still alive.  In the event that the OS crashes,
9353          * ASF needs to reset the hardware to free up the FIFO space
9354          * that may be filled with rx packets destined for the host.
9355          * If the FIFO is full, ASF will no longer function properly.
9356          *
9357          * Unintended resets have been reported on real time kernels
9358          * where the timer doesn't run on time.  Netpoll will also have
9359          * same problem.
9360          *
9361          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9362          * to check the ring condition when the heartbeat is expiring
9363          * before doing the reset.  This will prevent most unintended
9364          * resets.
9365          */
9366         if (!--tp->asf_counter) {
9367                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9368                         tg3_wait_for_event_ack(tp);
9369
9370                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9371                                       FWCMD_NICDRV_ALIVE3);
9372                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9373                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9374                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9375
9376                         tg3_generate_fw_event(tp);
9377                 }
9378                 tp->asf_counter = tp->asf_multiplier;
9379         }
9380
9381         spin_unlock(&tp->lock);
9382
9383 restart_timer:
9384         tp->timer.expires = jiffies + tp->timer_offset;
9385         add_timer(&tp->timer);
9386 }
9387
9388 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9389 {
9390         irq_handler_t fn;
9391         unsigned long flags;
9392         char *name;
9393         struct tg3_napi *tnapi = &tp->napi[irq_num];
9394
9395         if (tp->irq_cnt == 1)
9396                 name = tp->dev->name;
9397         else {
9398                 name = &tnapi->irq_lbl[0];
9399                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9400                 name[IFNAMSIZ-1] = 0;
9401         }
9402
9403         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9404                 fn = tg3_msi;
9405                 if (tg3_flag(tp, 1SHOT_MSI))
9406                         fn = tg3_msi_1shot;
9407                 flags = 0;
9408         } else {
9409                 fn = tg3_interrupt;
9410                 if (tg3_flag(tp, TAGGED_STATUS))
9411                         fn = tg3_interrupt_tagged;
9412                 flags = IRQF_SHARED;
9413         }
9414
9415         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9416 }
9417
9418 static int tg3_test_interrupt(struct tg3 *tp)
9419 {
9420         struct tg3_napi *tnapi = &tp->napi[0];
9421         struct net_device *dev = tp->dev;
9422         int err, i, intr_ok = 0;
9423         u32 val;
9424
9425         if (!netif_running(dev))
9426                 return -ENODEV;
9427
9428         tg3_disable_ints(tp);
9429
9430         free_irq(tnapi->irq_vec, tnapi);
9431
9432         /*
9433          * Turn off MSI one shot mode.  Otherwise this test has no
9434          * observable way to know whether the interrupt was delivered.
9435          */
9436         if (tg3_flag(tp, 57765_PLUS)) {
9437                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9438                 tw32(MSGINT_MODE, val);
9439         }
9440
9441         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9442                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9443         if (err)
9444                 return err;
9445
9446         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9447         tg3_enable_ints(tp);
9448
9449         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9450                tnapi->coal_now);
9451
9452         for (i = 0; i < 5; i++) {
9453                 u32 int_mbox, misc_host_ctrl;
9454
9455                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9456                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9457
9458                 if ((int_mbox != 0) ||
9459                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9460                         intr_ok = 1;
9461                         break;
9462                 }
9463
9464                 if (tg3_flag(tp, 57765_PLUS) &&
9465                     tnapi->hw_status->status_tag != tnapi->last_tag)
9466                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9467
9468                 msleep(10);
9469         }
9470
9471         tg3_disable_ints(tp);
9472
9473         free_irq(tnapi->irq_vec, tnapi);
9474
9475         err = tg3_request_irq(tp, 0);
9476
9477         if (err)
9478                 return err;
9479
9480         if (intr_ok) {
9481                 /* Reenable MSI one shot mode. */
9482                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9483                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9484                         tw32(MSGINT_MODE, val);
9485                 }
9486                 return 0;
9487         }
9488
9489         return -EIO;
9490 }
9491
9492 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9493  * successfully restored
9494  */
9495 static int tg3_test_msi(struct tg3 *tp)
9496 {
9497         int err;
9498         u16 pci_cmd;
9499
9500         if (!tg3_flag(tp, USING_MSI))
9501                 return 0;
9502
9503         /* Turn off SERR reporting in case MSI terminates with Master
9504          * Abort.
9505          */
9506         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9507         pci_write_config_word(tp->pdev, PCI_COMMAND,
9508                               pci_cmd & ~PCI_COMMAND_SERR);
9509
9510         err = tg3_test_interrupt(tp);
9511
9512         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9513
9514         if (!err)
9515                 return 0;
9516
9517         /* other failures */
9518         if (err != -EIO)
9519                 return err;
9520
9521         /* MSI test failed, go back to INTx mode */
9522         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9523                     "to INTx mode. Please report this failure to the PCI "
9524                     "maintainer and include system chipset information\n");
9525
9526         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9527
9528         pci_disable_msi(tp->pdev);
9529
9530         tg3_flag_clear(tp, USING_MSI);
9531         tp->napi[0].irq_vec = tp->pdev->irq;
9532
9533         err = tg3_request_irq(tp, 0);
9534         if (err)
9535                 return err;
9536
9537         /* Need to reset the chip because the MSI cycle may have terminated
9538          * with Master Abort.
9539          */
9540         tg3_full_lock(tp, 1);
9541
9542         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9543         err = tg3_init_hw(tp, 1);
9544
9545         tg3_full_unlock(tp);
9546
9547         if (err)
9548                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9549
9550         return err;
9551 }
9552
9553 static int tg3_request_firmware(struct tg3 *tp)
9554 {
9555         const __be32 *fw_data;
9556
9557         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9558                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9559                            tp->fw_needed);
9560                 return -ENOENT;
9561         }
9562
9563         fw_data = (void *)tp->fw->data;
9564
9565         /* Firmware blob starts with version numbers, followed by
9566          * start address and _full_ length including BSS sections
9567          * (which must be longer than the actual data, of course
9568          */
9569
9570         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9571         if (tp->fw_len < (tp->fw->size - 12)) {
9572                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9573                            tp->fw_len, tp->fw_needed);
9574                 release_firmware(tp->fw);
9575                 tp->fw = NULL;
9576                 return -EINVAL;
9577         }
9578
9579         /* We no longer need firmware; we have it. */
9580         tp->fw_needed = NULL;
9581         return 0;
9582 }
9583
9584 static bool tg3_enable_msix(struct tg3 *tp)
9585 {
9586         int i, rc, cpus = num_online_cpus();
9587         struct msix_entry msix_ent[tp->irq_max];
9588
9589         if (cpus == 1)
9590                 /* Just fallback to the simpler MSI mode. */
9591                 return false;
9592
9593         /*
9594          * We want as many rx rings enabled as there are cpus.
9595          * The first MSIX vector only deals with link interrupts, etc,
9596          * so we add one to the number of vectors we are requesting.
9597          */
9598         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9599
9600         for (i = 0; i < tp->irq_max; i++) {
9601                 msix_ent[i].entry  = i;
9602                 msix_ent[i].vector = 0;
9603         }
9604
9605         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9606         if (rc < 0) {
9607                 return false;
9608         } else if (rc != 0) {
9609                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9610                         return false;
9611                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9612                               tp->irq_cnt, rc);
9613                 tp->irq_cnt = rc;
9614         }
9615
9616         for (i = 0; i < tp->irq_max; i++)
9617                 tp->napi[i].irq_vec = msix_ent[i].vector;
9618
9619         netif_set_real_num_tx_queues(tp->dev, 1);
9620         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9621         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9622                 pci_disable_msix(tp->pdev);
9623                 return false;
9624         }
9625
9626         if (tp->irq_cnt > 1) {
9627                 tg3_flag_set(tp, ENABLE_RSS);
9628
9629                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9630                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9631                         tg3_flag_set(tp, ENABLE_TSS);
9632                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9633                 }
9634         }
9635
9636         return true;
9637 }
9638
9639 static void tg3_ints_init(struct tg3 *tp)
9640 {
9641         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9642             !tg3_flag(tp, TAGGED_STATUS)) {
9643                 /* All MSI supporting chips should support tagged
9644                  * status.  Assert that this is the case.
9645                  */
9646                 netdev_warn(tp->dev,
9647                             "MSI without TAGGED_STATUS? Not using MSI\n");
9648                 goto defcfg;
9649         }
9650
9651         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9652                 tg3_flag_set(tp, USING_MSIX);
9653         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9654                 tg3_flag_set(tp, USING_MSI);
9655
9656         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9657                 u32 msi_mode = tr32(MSGINT_MODE);
9658                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9659                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9660                 if (!tg3_flag(tp, 1SHOT_MSI))
9661                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9662                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9663         }
9664 defcfg:
9665         if (!tg3_flag(tp, USING_MSIX)) {
9666                 tp->irq_cnt = 1;
9667                 tp->napi[0].irq_vec = tp->pdev->irq;
9668                 netif_set_real_num_tx_queues(tp->dev, 1);
9669                 netif_set_real_num_rx_queues(tp->dev, 1);
9670         }
9671 }
9672
9673 static void tg3_ints_fini(struct tg3 *tp)
9674 {
9675         if (tg3_flag(tp, USING_MSIX))
9676                 pci_disable_msix(tp->pdev);
9677         else if (tg3_flag(tp, USING_MSI))
9678                 pci_disable_msi(tp->pdev);
9679         tg3_flag_clear(tp, USING_MSI);
9680         tg3_flag_clear(tp, USING_MSIX);
9681         tg3_flag_clear(tp, ENABLE_RSS);
9682         tg3_flag_clear(tp, ENABLE_TSS);
9683 }
9684
9685 static int tg3_open(struct net_device *dev)
9686 {
9687         struct tg3 *tp = netdev_priv(dev);
9688         int i, err;
9689
9690         if (tp->fw_needed) {
9691                 err = tg3_request_firmware(tp);
9692                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9693                         if (err)
9694                                 return err;
9695                 } else if (err) {
9696                         netdev_warn(tp->dev, "TSO capability disabled\n");
9697                         tg3_flag_clear(tp, TSO_CAPABLE);
9698                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9699                         netdev_notice(tp->dev, "TSO capability restored\n");
9700                         tg3_flag_set(tp, TSO_CAPABLE);
9701                 }
9702         }
9703
9704         netif_carrier_off(tp->dev);
9705
9706         err = tg3_power_up(tp);
9707         if (err)
9708                 return err;
9709
9710         tg3_full_lock(tp, 0);
9711
9712         tg3_disable_ints(tp);
9713         tg3_flag_clear(tp, INIT_COMPLETE);
9714
9715         tg3_full_unlock(tp);
9716
9717         /*
9718          * Setup interrupts first so we know how
9719          * many NAPI resources to allocate
9720          */
9721         tg3_ints_init(tp);
9722
9723         /* The placement of this call is tied
9724          * to the setup and use of Host TX descriptors.
9725          */
9726         err = tg3_alloc_consistent(tp);
9727         if (err)
9728                 goto err_out1;
9729
9730         tg3_napi_init(tp);
9731
9732         tg3_napi_enable(tp);
9733
9734         for (i = 0; i < tp->irq_cnt; i++) {
9735                 struct tg3_napi *tnapi = &tp->napi[i];
9736                 err = tg3_request_irq(tp, i);
9737                 if (err) {
9738                         for (i--; i >= 0; i--) {
9739                                 tnapi = &tp->napi[i];
9740                                 free_irq(tnapi->irq_vec, tnapi);
9741                         }
9742                         goto err_out2;
9743                 }
9744         }
9745
9746         tg3_full_lock(tp, 0);
9747
9748         err = tg3_init_hw(tp, 1);
9749         if (err) {
9750                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9751                 tg3_free_rings(tp);
9752         } else {
9753                 if (tg3_flag(tp, TAGGED_STATUS) &&
9754                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9755                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9756                         tp->timer_offset = HZ;
9757                 else
9758                         tp->timer_offset = HZ / 10;
9759
9760                 BUG_ON(tp->timer_offset > HZ);
9761                 tp->timer_counter = tp->timer_multiplier =
9762                         (HZ / tp->timer_offset);
9763                 tp->asf_counter = tp->asf_multiplier =
9764                         ((HZ / tp->timer_offset) * 2);
9765
9766                 init_timer(&tp->timer);
9767                 tp->timer.expires = jiffies + tp->timer_offset;
9768                 tp->timer.data = (unsigned long) tp;
9769                 tp->timer.function = tg3_timer;
9770         }
9771
9772         tg3_full_unlock(tp);
9773
9774         if (err)
9775                 goto err_out3;
9776
9777         if (tg3_flag(tp, USING_MSI)) {
9778                 err = tg3_test_msi(tp);
9779
9780                 if (err) {
9781                         tg3_full_lock(tp, 0);
9782                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9783                         tg3_free_rings(tp);
9784                         tg3_full_unlock(tp);
9785
9786                         goto err_out2;
9787                 }
9788
9789                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9790                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9791
9792                         tw32(PCIE_TRANSACTION_CFG,
9793                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9794                 }
9795         }
9796
9797         tg3_phy_start(tp);
9798
9799         tg3_full_lock(tp, 0);
9800
9801         add_timer(&tp->timer);
9802         tg3_flag_set(tp, INIT_COMPLETE);
9803         tg3_enable_ints(tp);
9804
9805         tg3_full_unlock(tp);
9806
9807         netif_tx_start_all_queues(dev);
9808
9809         /*
9810          * Reset loopback feature if it was turned on while the device was down
9811          * make sure that it's installed properly now.
9812          */
9813         if (dev->features & NETIF_F_LOOPBACK)
9814                 tg3_set_loopback(dev, dev->features);
9815
9816         return 0;
9817
9818 err_out3:
9819         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9820                 struct tg3_napi *tnapi = &tp->napi[i];
9821                 free_irq(tnapi->irq_vec, tnapi);
9822         }
9823
9824 err_out2:
9825         tg3_napi_disable(tp);
9826         tg3_napi_fini(tp);
9827         tg3_free_consistent(tp);
9828
9829 err_out1:
9830         tg3_ints_fini(tp);
9831         tg3_frob_aux_power(tp, false);
9832         pci_set_power_state(tp->pdev, PCI_D3hot);
9833         return err;
9834 }
9835
9836 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9837                                                  struct rtnl_link_stats64 *);
9838 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9839
9840 static int tg3_close(struct net_device *dev)
9841 {
9842         int i;
9843         struct tg3 *tp = netdev_priv(dev);
9844
9845         tg3_napi_disable(tp);
9846         tg3_reset_task_cancel(tp);
9847
9848         netif_tx_stop_all_queues(dev);
9849
9850         del_timer_sync(&tp->timer);
9851
9852         tg3_phy_stop(tp);
9853
9854         tg3_full_lock(tp, 1);
9855
9856         tg3_disable_ints(tp);
9857
9858         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9859         tg3_free_rings(tp);
9860         tg3_flag_clear(tp, INIT_COMPLETE);
9861
9862         tg3_full_unlock(tp);
9863
9864         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9865                 struct tg3_napi *tnapi = &tp->napi[i];
9866                 free_irq(tnapi->irq_vec, tnapi);
9867         }
9868
9869         tg3_ints_fini(tp);
9870
9871         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9872
9873         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9874                sizeof(tp->estats_prev));
9875
9876         tg3_napi_fini(tp);
9877
9878         tg3_free_consistent(tp);
9879
9880         tg3_power_down(tp);
9881
9882         netif_carrier_off(tp->dev);
9883
9884         return 0;
9885 }
9886
9887 static inline u64 get_stat64(tg3_stat64_t *val)
9888 {
9889        return ((u64)val->high << 32) | ((u64)val->low);
9890 }
9891
9892 static u64 calc_crc_errors(struct tg3 *tp)
9893 {
9894         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9895
9896         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9898              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9899                 u32 val;
9900
9901                 spin_lock_bh(&tp->lock);
9902                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9903                         tg3_writephy(tp, MII_TG3_TEST1,
9904                                      val | MII_TG3_TEST1_CRC_EN);
9905                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9906                 } else
9907                         val = 0;
9908                 spin_unlock_bh(&tp->lock);
9909
9910                 tp->phy_crc_errors += val;
9911
9912                 return tp->phy_crc_errors;
9913         }
9914
9915         return get_stat64(&hw_stats->rx_fcs_errors);
9916 }
9917
9918 #define ESTAT_ADD(member) \
9919         estats->member =        old_estats->member + \
9920                                 get_stat64(&hw_stats->member)
9921
9922 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9923 {
9924         struct tg3_ethtool_stats *estats = &tp->estats;
9925         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9926         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9927
9928         if (!hw_stats)
9929                 return old_estats;
9930
9931         ESTAT_ADD(rx_octets);
9932         ESTAT_ADD(rx_fragments);
9933         ESTAT_ADD(rx_ucast_packets);
9934         ESTAT_ADD(rx_mcast_packets);
9935         ESTAT_ADD(rx_bcast_packets);
9936         ESTAT_ADD(rx_fcs_errors);
9937         ESTAT_ADD(rx_align_errors);
9938         ESTAT_ADD(rx_xon_pause_rcvd);
9939         ESTAT_ADD(rx_xoff_pause_rcvd);
9940         ESTAT_ADD(rx_mac_ctrl_rcvd);
9941         ESTAT_ADD(rx_xoff_entered);
9942         ESTAT_ADD(rx_frame_too_long_errors);
9943         ESTAT_ADD(rx_jabbers);
9944         ESTAT_ADD(rx_undersize_packets);
9945         ESTAT_ADD(rx_in_length_errors);
9946         ESTAT_ADD(rx_out_length_errors);
9947         ESTAT_ADD(rx_64_or_less_octet_packets);
9948         ESTAT_ADD(rx_65_to_127_octet_packets);
9949         ESTAT_ADD(rx_128_to_255_octet_packets);
9950         ESTAT_ADD(rx_256_to_511_octet_packets);
9951         ESTAT_ADD(rx_512_to_1023_octet_packets);
9952         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9953         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9954         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9955         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9956         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9957
9958         ESTAT_ADD(tx_octets);
9959         ESTAT_ADD(tx_collisions);
9960         ESTAT_ADD(tx_xon_sent);
9961         ESTAT_ADD(tx_xoff_sent);
9962         ESTAT_ADD(tx_flow_control);
9963         ESTAT_ADD(tx_mac_errors);
9964         ESTAT_ADD(tx_single_collisions);
9965         ESTAT_ADD(tx_mult_collisions);
9966         ESTAT_ADD(tx_deferred);
9967         ESTAT_ADD(tx_excessive_collisions);
9968         ESTAT_ADD(tx_late_collisions);
9969         ESTAT_ADD(tx_collide_2times);
9970         ESTAT_ADD(tx_collide_3times);
9971         ESTAT_ADD(tx_collide_4times);
9972         ESTAT_ADD(tx_collide_5times);
9973         ESTAT_ADD(tx_collide_6times);
9974         ESTAT_ADD(tx_collide_7times);
9975         ESTAT_ADD(tx_collide_8times);
9976         ESTAT_ADD(tx_collide_9times);
9977         ESTAT_ADD(tx_collide_10times);
9978         ESTAT_ADD(tx_collide_11times);
9979         ESTAT_ADD(tx_collide_12times);
9980         ESTAT_ADD(tx_collide_13times);
9981         ESTAT_ADD(tx_collide_14times);
9982         ESTAT_ADD(tx_collide_15times);
9983         ESTAT_ADD(tx_ucast_packets);
9984         ESTAT_ADD(tx_mcast_packets);
9985         ESTAT_ADD(tx_bcast_packets);
9986         ESTAT_ADD(tx_carrier_sense_errors);
9987         ESTAT_ADD(tx_discards);
9988         ESTAT_ADD(tx_errors);
9989
9990         ESTAT_ADD(dma_writeq_full);
9991         ESTAT_ADD(dma_write_prioq_full);
9992         ESTAT_ADD(rxbds_empty);
9993         ESTAT_ADD(rx_discards);
9994         ESTAT_ADD(rx_errors);
9995         ESTAT_ADD(rx_threshold_hit);
9996
9997         ESTAT_ADD(dma_readq_full);
9998         ESTAT_ADD(dma_read_prioq_full);
9999         ESTAT_ADD(tx_comp_queue_full);
10000
10001         ESTAT_ADD(ring_set_send_prod_index);
10002         ESTAT_ADD(ring_status_update);
10003         ESTAT_ADD(nic_irqs);
10004         ESTAT_ADD(nic_avoided_irqs);
10005         ESTAT_ADD(nic_tx_threshold_hit);
10006
10007         ESTAT_ADD(mbuf_lwm_thresh_hit);
10008
10009         return estats;
10010 }
10011
10012 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10013                                                  struct rtnl_link_stats64 *stats)
10014 {
10015         struct tg3 *tp = netdev_priv(dev);
10016         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10017         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10018
10019         if (!hw_stats)
10020                 return old_stats;
10021
10022         stats->rx_packets = old_stats->rx_packets +
10023                 get_stat64(&hw_stats->rx_ucast_packets) +
10024                 get_stat64(&hw_stats->rx_mcast_packets) +
10025                 get_stat64(&hw_stats->rx_bcast_packets);
10026
10027         stats->tx_packets = old_stats->tx_packets +
10028                 get_stat64(&hw_stats->tx_ucast_packets) +
10029                 get_stat64(&hw_stats->tx_mcast_packets) +
10030                 get_stat64(&hw_stats->tx_bcast_packets);
10031
10032         stats->rx_bytes = old_stats->rx_bytes +
10033                 get_stat64(&hw_stats->rx_octets);
10034         stats->tx_bytes = old_stats->tx_bytes +
10035                 get_stat64(&hw_stats->tx_octets);
10036
10037         stats->rx_errors = old_stats->rx_errors +
10038                 get_stat64(&hw_stats->rx_errors);
10039         stats->tx_errors = old_stats->tx_errors +
10040                 get_stat64(&hw_stats->tx_errors) +
10041                 get_stat64(&hw_stats->tx_mac_errors) +
10042                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10043                 get_stat64(&hw_stats->tx_discards);
10044
10045         stats->multicast = old_stats->multicast +
10046                 get_stat64(&hw_stats->rx_mcast_packets);
10047         stats->collisions = old_stats->collisions +
10048                 get_stat64(&hw_stats->tx_collisions);
10049
10050         stats->rx_length_errors = old_stats->rx_length_errors +
10051                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10052                 get_stat64(&hw_stats->rx_undersize_packets);
10053
10054         stats->rx_over_errors = old_stats->rx_over_errors +
10055                 get_stat64(&hw_stats->rxbds_empty);
10056         stats->rx_frame_errors = old_stats->rx_frame_errors +
10057                 get_stat64(&hw_stats->rx_align_errors);
10058         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10059                 get_stat64(&hw_stats->tx_discards);
10060         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10061                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10062
10063         stats->rx_crc_errors = old_stats->rx_crc_errors +
10064                 calc_crc_errors(tp);
10065
10066         stats->rx_missed_errors = old_stats->rx_missed_errors +
10067                 get_stat64(&hw_stats->rx_discards);
10068
10069         stats->rx_dropped = tp->rx_dropped;
10070         stats->tx_dropped = tp->tx_dropped;
10071
10072         return stats;
10073 }
10074
10075 static inline u32 calc_crc(unsigned char *buf, int len)
10076 {
10077         u32 reg;
10078         u32 tmp;
10079         int j, k;
10080
10081         reg = 0xffffffff;
10082
10083         for (j = 0; j < len; j++) {
10084                 reg ^= buf[j];
10085
10086                 for (k = 0; k < 8; k++) {
10087                         tmp = reg & 0x01;
10088
10089                         reg >>= 1;
10090
10091                         if (tmp)
10092                                 reg ^= 0xedb88320;
10093                 }
10094         }
10095
10096         return ~reg;
10097 }
10098
10099 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10100 {
10101         /* accept or reject all multicast frames */
10102         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10103         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10104         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10105         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10106 }
10107
10108 static void __tg3_set_rx_mode(struct net_device *dev)
10109 {
10110         struct tg3 *tp = netdev_priv(dev);
10111         u32 rx_mode;
10112
10113         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10114                                   RX_MODE_KEEP_VLAN_TAG);
10115
10116 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10117         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10118          * flag clear.
10119          */
10120         if (!tg3_flag(tp, ENABLE_ASF))
10121                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10122 #endif
10123
10124         if (dev->flags & IFF_PROMISC) {
10125                 /* Promiscuous mode. */
10126                 rx_mode |= RX_MODE_PROMISC;
10127         } else if (dev->flags & IFF_ALLMULTI) {
10128                 /* Accept all multicast. */
10129                 tg3_set_multi(tp, 1);
10130         } else if (netdev_mc_empty(dev)) {
10131                 /* Reject all multicast. */
10132                 tg3_set_multi(tp, 0);
10133         } else {
10134                 /* Accept one or more multicast(s). */
10135                 struct netdev_hw_addr *ha;
10136                 u32 mc_filter[4] = { 0, };
10137                 u32 regidx;
10138                 u32 bit;
10139                 u32 crc;
10140
10141                 netdev_for_each_mc_addr(ha, dev) {
10142                         crc = calc_crc(ha->addr, ETH_ALEN);
10143                         bit = ~crc & 0x7f;
10144                         regidx = (bit & 0x60) >> 5;
10145                         bit &= 0x1f;
10146                         mc_filter[regidx] |= (1 << bit);
10147                 }
10148
10149                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10150                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10151                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10152                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10153         }
10154
10155         if (rx_mode != tp->rx_mode) {
10156                 tp->rx_mode = rx_mode;
10157                 tw32_f(MAC_RX_MODE, rx_mode);
10158                 udelay(10);
10159         }
10160 }
10161
10162 static void tg3_set_rx_mode(struct net_device *dev)
10163 {
10164         struct tg3 *tp = netdev_priv(dev);
10165
10166         if (!netif_running(dev))
10167                 return;
10168
10169         tg3_full_lock(tp, 0);
10170         __tg3_set_rx_mode(dev);
10171         tg3_full_unlock(tp);
10172 }
10173
10174 static int tg3_get_regs_len(struct net_device *dev)
10175 {
10176         return TG3_REG_BLK_SIZE;
10177 }
10178
10179 static void tg3_get_regs(struct net_device *dev,
10180                 struct ethtool_regs *regs, void *_p)
10181 {
10182         struct tg3 *tp = netdev_priv(dev);
10183
10184         regs->version = 0;
10185
10186         memset(_p, 0, TG3_REG_BLK_SIZE);
10187
10188         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10189                 return;
10190
10191         tg3_full_lock(tp, 0);
10192
10193         tg3_dump_legacy_regs(tp, (u32 *)_p);
10194
10195         tg3_full_unlock(tp);
10196 }
10197
10198 static int tg3_get_eeprom_len(struct net_device *dev)
10199 {
10200         struct tg3 *tp = netdev_priv(dev);
10201
10202         return tp->nvram_size;
10203 }
10204
10205 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10206 {
10207         struct tg3 *tp = netdev_priv(dev);
10208         int ret;
10209         u8  *pd;
10210         u32 i, offset, len, b_offset, b_count;
10211         __be32 val;
10212
10213         if (tg3_flag(tp, NO_NVRAM))
10214                 return -EINVAL;
10215
10216         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10217                 return -EAGAIN;
10218
10219         offset = eeprom->offset;
10220         len = eeprom->len;
10221         eeprom->len = 0;
10222
10223         eeprom->magic = TG3_EEPROM_MAGIC;
10224
10225         if (offset & 3) {
10226                 /* adjustments to start on required 4 byte boundary */
10227                 b_offset = offset & 3;
10228                 b_count = 4 - b_offset;
10229                 if (b_count > len) {
10230                         /* i.e. offset=1 len=2 */
10231                         b_count = len;
10232                 }
10233                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10234                 if (ret)
10235                         return ret;
10236                 memcpy(data, ((char *)&val) + b_offset, b_count);
10237                 len -= b_count;
10238                 offset += b_count;
10239                 eeprom->len += b_count;
10240         }
10241
10242         /* read bytes up to the last 4 byte boundary */
10243         pd = &data[eeprom->len];
10244         for (i = 0; i < (len - (len & 3)); i += 4) {
10245                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10246                 if (ret) {
10247                         eeprom->len += i;
10248                         return ret;
10249                 }
10250                 memcpy(pd + i, &val, 4);
10251         }
10252         eeprom->len += i;
10253
10254         if (len & 3) {
10255                 /* read last bytes not ending on 4 byte boundary */
10256                 pd = &data[eeprom->len];
10257                 b_count = len & 3;
10258                 b_offset = offset + len - b_count;
10259                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10260                 if (ret)
10261                         return ret;
10262                 memcpy(pd, &val, b_count);
10263                 eeprom->len += b_count;
10264         }
10265         return 0;
10266 }
10267
10268 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10269
10270 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10271 {
10272         struct tg3 *tp = netdev_priv(dev);
10273         int ret;
10274         u32 offset, len, b_offset, odd_len;
10275         u8 *buf;
10276         __be32 start, end;
10277
10278         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10279                 return -EAGAIN;
10280
10281         if (tg3_flag(tp, NO_NVRAM) ||
10282             eeprom->magic != TG3_EEPROM_MAGIC)
10283                 return -EINVAL;
10284
10285         offset = eeprom->offset;
10286         len = eeprom->len;
10287
10288         if ((b_offset = (offset & 3))) {
10289                 /* adjustments to start on required 4 byte boundary */
10290                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10291                 if (ret)
10292                         return ret;
10293                 len += b_offset;
10294                 offset &= ~3;
10295                 if (len < 4)
10296                         len = 4;
10297         }
10298
10299         odd_len = 0;
10300         if (len & 3) {
10301                 /* adjustments to end on required 4 byte boundary */
10302                 odd_len = 1;
10303                 len = (len + 3) & ~3;
10304                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10305                 if (ret)
10306                         return ret;
10307         }
10308
10309         buf = data;
10310         if (b_offset || odd_len) {
10311                 buf = kmalloc(len, GFP_KERNEL);
10312                 if (!buf)
10313                         return -ENOMEM;
10314                 if (b_offset)
10315                         memcpy(buf, &start, 4);
10316                 if (odd_len)
10317                         memcpy(buf+len-4, &end, 4);
10318                 memcpy(buf + b_offset, data, eeprom->len);
10319         }
10320
10321         ret = tg3_nvram_write_block(tp, offset, len, buf);
10322
10323         if (buf != data)
10324                 kfree(buf);
10325
10326         return ret;
10327 }
10328
10329 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10330 {
10331         struct tg3 *tp = netdev_priv(dev);
10332
10333         if (tg3_flag(tp, USE_PHYLIB)) {
10334                 struct phy_device *phydev;
10335                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10336                         return -EAGAIN;
10337                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10338                 return phy_ethtool_gset(phydev, cmd);
10339         }
10340
10341         cmd->supported = (SUPPORTED_Autoneg);
10342
10343         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10344                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10345                                    SUPPORTED_1000baseT_Full);
10346
10347         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10348                 cmd->supported |= (SUPPORTED_100baseT_Half |
10349                                   SUPPORTED_100baseT_Full |
10350                                   SUPPORTED_10baseT_Half |
10351                                   SUPPORTED_10baseT_Full |
10352                                   SUPPORTED_TP);
10353                 cmd->port = PORT_TP;
10354         } else {
10355                 cmd->supported |= SUPPORTED_FIBRE;
10356                 cmd->port = PORT_FIBRE;
10357         }
10358
10359         cmd->advertising = tp->link_config.advertising;
10360         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10361                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10362                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10363                                 cmd->advertising |= ADVERTISED_Pause;
10364                         } else {
10365                                 cmd->advertising |= ADVERTISED_Pause |
10366                                                     ADVERTISED_Asym_Pause;
10367                         }
10368                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10369                         cmd->advertising |= ADVERTISED_Asym_Pause;
10370                 }
10371         }
10372         if (netif_running(dev)) {
10373                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10374                 cmd->duplex = tp->link_config.active_duplex;
10375         } else {
10376                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10377                 cmd->duplex = DUPLEX_INVALID;
10378         }
10379         cmd->phy_address = tp->phy_addr;
10380         cmd->transceiver = XCVR_INTERNAL;
10381         cmd->autoneg = tp->link_config.autoneg;
10382         cmd->maxtxpkt = 0;
10383         cmd->maxrxpkt = 0;
10384         return 0;
10385 }
10386
10387 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10388 {
10389         struct tg3 *tp = netdev_priv(dev);
10390         u32 speed = ethtool_cmd_speed(cmd);
10391
10392         if (tg3_flag(tp, USE_PHYLIB)) {
10393                 struct phy_device *phydev;
10394                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10395                         return -EAGAIN;
10396                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10397                 return phy_ethtool_sset(phydev, cmd);
10398         }
10399
10400         if (cmd->autoneg != AUTONEG_ENABLE &&
10401             cmd->autoneg != AUTONEG_DISABLE)
10402                 return -EINVAL;
10403
10404         if (cmd->autoneg == AUTONEG_DISABLE &&
10405             cmd->duplex != DUPLEX_FULL &&
10406             cmd->duplex != DUPLEX_HALF)
10407                 return -EINVAL;
10408
10409         if (cmd->autoneg == AUTONEG_ENABLE) {
10410                 u32 mask = ADVERTISED_Autoneg |
10411                            ADVERTISED_Pause |
10412                            ADVERTISED_Asym_Pause;
10413
10414                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10415                         mask |= ADVERTISED_1000baseT_Half |
10416                                 ADVERTISED_1000baseT_Full;
10417
10418                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10419                         mask |= ADVERTISED_100baseT_Half |
10420                                 ADVERTISED_100baseT_Full |
10421                                 ADVERTISED_10baseT_Half |
10422                                 ADVERTISED_10baseT_Full |
10423                                 ADVERTISED_TP;
10424                 else
10425                         mask |= ADVERTISED_FIBRE;
10426
10427                 if (cmd->advertising & ~mask)
10428                         return -EINVAL;
10429
10430                 mask &= (ADVERTISED_1000baseT_Half |
10431                          ADVERTISED_1000baseT_Full |
10432                          ADVERTISED_100baseT_Half |
10433                          ADVERTISED_100baseT_Full |
10434                          ADVERTISED_10baseT_Half |
10435                          ADVERTISED_10baseT_Full);
10436
10437                 cmd->advertising &= mask;
10438         } else {
10439                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10440                         if (speed != SPEED_1000)
10441                                 return -EINVAL;
10442
10443                         if (cmd->duplex != DUPLEX_FULL)
10444                                 return -EINVAL;
10445                 } else {
10446                         if (speed != SPEED_100 &&
10447                             speed != SPEED_10)
10448                                 return -EINVAL;
10449                 }
10450         }
10451
10452         tg3_full_lock(tp, 0);
10453
10454         tp->link_config.autoneg = cmd->autoneg;
10455         if (cmd->autoneg == AUTONEG_ENABLE) {
10456                 tp->link_config.advertising = (cmd->advertising |
10457                                               ADVERTISED_Autoneg);
10458                 tp->link_config.speed = SPEED_INVALID;
10459                 tp->link_config.duplex = DUPLEX_INVALID;
10460         } else {
10461                 tp->link_config.advertising = 0;
10462                 tp->link_config.speed = speed;
10463                 tp->link_config.duplex = cmd->duplex;
10464         }
10465
10466         tp->link_config.orig_speed = tp->link_config.speed;
10467         tp->link_config.orig_duplex = tp->link_config.duplex;
10468         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10469
10470         if (netif_running(dev))
10471                 tg3_setup_phy(tp, 1);
10472
10473         tg3_full_unlock(tp);
10474
10475         return 0;
10476 }
10477
10478 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10479 {
10480         struct tg3 *tp = netdev_priv(dev);
10481
10482         strcpy(info->driver, DRV_MODULE_NAME);
10483         strcpy(info->version, DRV_MODULE_VERSION);
10484         strcpy(info->fw_version, tp->fw_ver);
10485         strcpy(info->bus_info, pci_name(tp->pdev));
10486 }
10487
10488 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10489 {
10490         struct tg3 *tp = netdev_priv(dev);
10491
10492         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10493                 wol->supported = WAKE_MAGIC;
10494         else
10495                 wol->supported = 0;
10496         wol->wolopts = 0;
10497         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10498                 wol->wolopts = WAKE_MAGIC;
10499         memset(&wol->sopass, 0, sizeof(wol->sopass));
10500 }
10501
10502 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10503 {
10504         struct tg3 *tp = netdev_priv(dev);
10505         struct device *dp = &tp->pdev->dev;
10506
10507         if (wol->wolopts & ~WAKE_MAGIC)
10508                 return -EINVAL;
10509         if ((wol->wolopts & WAKE_MAGIC) &&
10510             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10511                 return -EINVAL;
10512
10513         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10514
10515         spin_lock_bh(&tp->lock);
10516         if (device_may_wakeup(dp))
10517                 tg3_flag_set(tp, WOL_ENABLE);
10518         else
10519                 tg3_flag_clear(tp, WOL_ENABLE);
10520         spin_unlock_bh(&tp->lock);
10521
10522         return 0;
10523 }
10524
10525 static u32 tg3_get_msglevel(struct net_device *dev)
10526 {
10527         struct tg3 *tp = netdev_priv(dev);
10528         return tp->msg_enable;
10529 }
10530
10531 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10532 {
10533         struct tg3 *tp = netdev_priv(dev);
10534         tp->msg_enable = value;
10535 }
10536
10537 static int tg3_nway_reset(struct net_device *dev)
10538 {
10539         struct tg3 *tp = netdev_priv(dev);
10540         int r;
10541
10542         if (!netif_running(dev))
10543                 return -EAGAIN;
10544
10545         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10546                 return -EINVAL;
10547
10548         if (tg3_flag(tp, USE_PHYLIB)) {
10549                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10550                         return -EAGAIN;
10551                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10552         } else {
10553                 u32 bmcr;
10554
10555                 spin_lock_bh(&tp->lock);
10556                 r = -EINVAL;
10557                 tg3_readphy(tp, MII_BMCR, &bmcr);
10558                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10559                     ((bmcr & BMCR_ANENABLE) ||
10560                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10561                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10562                                                    BMCR_ANENABLE);
10563                         r = 0;
10564                 }
10565                 spin_unlock_bh(&tp->lock);
10566         }
10567
10568         return r;
10569 }
10570
10571 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10572 {
10573         struct tg3 *tp = netdev_priv(dev);
10574
10575         ering->rx_max_pending = tp->rx_std_ring_mask;
10576         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10577                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10578         else
10579                 ering->rx_jumbo_max_pending = 0;
10580
10581         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10582
10583         ering->rx_pending = tp->rx_pending;
10584         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10585                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10586         else
10587                 ering->rx_jumbo_pending = 0;
10588
10589         ering->tx_pending = tp->napi[0].tx_pending;
10590 }
10591
10592 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10593 {
10594         struct tg3 *tp = netdev_priv(dev);
10595         int i, irq_sync = 0, err = 0;
10596
10597         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10598             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10599             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10600             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10601             (tg3_flag(tp, TSO_BUG) &&
10602              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10603                 return -EINVAL;
10604
10605         if (netif_running(dev)) {
10606                 tg3_phy_stop(tp);
10607                 tg3_netif_stop(tp);
10608                 irq_sync = 1;
10609         }
10610
10611         tg3_full_lock(tp, irq_sync);
10612
10613         tp->rx_pending = ering->rx_pending;
10614
10615         if (tg3_flag(tp, MAX_RXPEND_64) &&
10616             tp->rx_pending > 63)
10617                 tp->rx_pending = 63;
10618         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10619
10620         for (i = 0; i < tp->irq_max; i++)
10621                 tp->napi[i].tx_pending = ering->tx_pending;
10622
10623         if (netif_running(dev)) {
10624                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10625                 err = tg3_restart_hw(tp, 1);
10626                 if (!err)
10627                         tg3_netif_start(tp);
10628         }
10629
10630         tg3_full_unlock(tp);
10631
10632         if (irq_sync && !err)
10633                 tg3_phy_start(tp);
10634
10635         return err;
10636 }
10637
10638 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10639 {
10640         struct tg3 *tp = netdev_priv(dev);
10641
10642         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10643
10644         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10645                 epause->rx_pause = 1;
10646         else
10647                 epause->rx_pause = 0;
10648
10649         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10650                 epause->tx_pause = 1;
10651         else
10652                 epause->tx_pause = 0;
10653 }
10654
10655 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10656 {
10657         struct tg3 *tp = netdev_priv(dev);
10658         int err = 0;
10659
10660         if (tg3_flag(tp, USE_PHYLIB)) {
10661                 u32 newadv;
10662                 struct phy_device *phydev;
10663
10664                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10665
10666                 if (!(phydev->supported & SUPPORTED_Pause) ||
10667                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10668                      (epause->rx_pause != epause->tx_pause)))
10669                         return -EINVAL;
10670
10671                 tp->link_config.flowctrl = 0;
10672                 if (epause->rx_pause) {
10673                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10674
10675                         if (epause->tx_pause) {
10676                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10677                                 newadv = ADVERTISED_Pause;
10678                         } else
10679                                 newadv = ADVERTISED_Pause |
10680                                          ADVERTISED_Asym_Pause;
10681                 } else if (epause->tx_pause) {
10682                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10683                         newadv = ADVERTISED_Asym_Pause;
10684                 } else
10685                         newadv = 0;
10686
10687                 if (epause->autoneg)
10688                         tg3_flag_set(tp, PAUSE_AUTONEG);
10689                 else
10690                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10691
10692                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10693                         u32 oldadv = phydev->advertising &
10694                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10695                         if (oldadv != newadv) {
10696                                 phydev->advertising &=
10697                                         ~(ADVERTISED_Pause |
10698                                           ADVERTISED_Asym_Pause);
10699                                 phydev->advertising |= newadv;
10700                                 if (phydev->autoneg) {
10701                                         /*
10702                                          * Always renegotiate the link to
10703                                          * inform our link partner of our
10704                                          * flow control settings, even if the
10705                                          * flow control is forced.  Let
10706                                          * tg3_adjust_link() do the final
10707                                          * flow control setup.
10708                                          */
10709                                         return phy_start_aneg(phydev);
10710                                 }
10711                         }
10712
10713                         if (!epause->autoneg)
10714                                 tg3_setup_flow_control(tp, 0, 0);
10715                 } else {
10716                         tp->link_config.orig_advertising &=
10717                                         ~(ADVERTISED_Pause |
10718                                           ADVERTISED_Asym_Pause);
10719                         tp->link_config.orig_advertising |= newadv;
10720                 }
10721         } else {
10722                 int irq_sync = 0;
10723
10724                 if (netif_running(dev)) {
10725                         tg3_netif_stop(tp);
10726                         irq_sync = 1;
10727                 }
10728
10729                 tg3_full_lock(tp, irq_sync);
10730
10731                 if (epause->autoneg)
10732                         tg3_flag_set(tp, PAUSE_AUTONEG);
10733                 else
10734                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10735                 if (epause->rx_pause)
10736                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10737                 else
10738                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10739                 if (epause->tx_pause)
10740                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10741                 else
10742                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10743
10744                 if (netif_running(dev)) {
10745                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10746                         err = tg3_restart_hw(tp, 1);
10747                         if (!err)
10748                                 tg3_netif_start(tp);
10749                 }
10750
10751                 tg3_full_unlock(tp);
10752         }
10753
10754         return err;
10755 }
10756
10757 static int tg3_get_sset_count(struct net_device *dev, int sset)
10758 {
10759         switch (sset) {
10760         case ETH_SS_TEST:
10761                 return TG3_NUM_TEST;
10762         case ETH_SS_STATS:
10763                 return TG3_NUM_STATS;
10764         default:
10765                 return -EOPNOTSUPP;
10766         }
10767 }
10768
10769 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10770 {
10771         switch (stringset) {
10772         case ETH_SS_STATS:
10773                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10774                 break;
10775         case ETH_SS_TEST:
10776                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10777                 break;
10778         default:
10779                 WARN_ON(1);     /* we need a WARN() */
10780                 break;
10781         }
10782 }
10783
10784 static int tg3_set_phys_id(struct net_device *dev,
10785                             enum ethtool_phys_id_state state)
10786 {
10787         struct tg3 *tp = netdev_priv(dev);
10788
10789         if (!netif_running(tp->dev))
10790                 return -EAGAIN;
10791
10792         switch (state) {
10793         case ETHTOOL_ID_ACTIVE:
10794                 return 1;       /* cycle on/off once per second */
10795
10796         case ETHTOOL_ID_ON:
10797                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10798                      LED_CTRL_1000MBPS_ON |
10799                      LED_CTRL_100MBPS_ON |
10800                      LED_CTRL_10MBPS_ON |
10801                      LED_CTRL_TRAFFIC_OVERRIDE |
10802                      LED_CTRL_TRAFFIC_BLINK |
10803                      LED_CTRL_TRAFFIC_LED);
10804                 break;
10805
10806         case ETHTOOL_ID_OFF:
10807                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10808                      LED_CTRL_TRAFFIC_OVERRIDE);
10809                 break;
10810
10811         case ETHTOOL_ID_INACTIVE:
10812                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10813                 break;
10814         }
10815
10816         return 0;
10817 }
10818
10819 static void tg3_get_ethtool_stats(struct net_device *dev,
10820                                    struct ethtool_stats *estats, u64 *tmp_stats)
10821 {
10822         struct tg3 *tp = netdev_priv(dev);
10823         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10824 }
10825
10826 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10827 {
10828         int i;
10829         __be32 *buf;
10830         u32 offset = 0, len = 0;
10831         u32 magic, val;
10832
10833         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10834                 return NULL;
10835
10836         if (magic == TG3_EEPROM_MAGIC) {
10837                 for (offset = TG3_NVM_DIR_START;
10838                      offset < TG3_NVM_DIR_END;
10839                      offset += TG3_NVM_DIRENT_SIZE) {
10840                         if (tg3_nvram_read(tp, offset, &val))
10841                                 return NULL;
10842
10843                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10844                             TG3_NVM_DIRTYPE_EXTVPD)
10845                                 break;
10846                 }
10847
10848                 if (offset != TG3_NVM_DIR_END) {
10849                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10850                         if (tg3_nvram_read(tp, offset + 4, &offset))
10851                                 return NULL;
10852
10853                         offset = tg3_nvram_logical_addr(tp, offset);
10854                 }
10855         }
10856
10857         if (!offset || !len) {
10858                 offset = TG3_NVM_VPD_OFF;
10859                 len = TG3_NVM_VPD_LEN;
10860         }
10861
10862         buf = kmalloc(len, GFP_KERNEL);
10863         if (buf == NULL)
10864                 return NULL;
10865
10866         if (magic == TG3_EEPROM_MAGIC) {
10867                 for (i = 0; i < len; i += 4) {
10868                         /* The data is in little-endian format in NVRAM.
10869                          * Use the big-endian read routines to preserve
10870                          * the byte order as it exists in NVRAM.
10871                          */
10872                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10873                                 goto error;
10874                 }
10875         } else {
10876                 u8 *ptr;
10877                 ssize_t cnt;
10878                 unsigned int pos = 0;
10879
10880                 ptr = (u8 *)&buf[0];
10881                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10882                         cnt = pci_read_vpd(tp->pdev, pos,
10883                                            len - pos, ptr);
10884                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10885                                 cnt = 0;
10886                         else if (cnt < 0)
10887                                 goto error;
10888                 }
10889                 if (pos != len)
10890                         goto error;
10891         }
10892
10893         *vpdlen = len;
10894
10895         return buf;
10896
10897 error:
10898         kfree(buf);
10899         return NULL;
10900 }
10901
10902 #define NVRAM_TEST_SIZE 0x100
10903 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10904 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10905 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10906 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10907 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10908 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10909 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10910 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10911
10912 static int tg3_test_nvram(struct tg3 *tp)
10913 {
10914         u32 csum, magic, len;
10915         __be32 *buf;
10916         int i, j, k, err = 0, size;
10917
10918         if (tg3_flag(tp, NO_NVRAM))
10919                 return 0;
10920
10921         if (tg3_nvram_read(tp, 0, &magic) != 0)
10922                 return -EIO;
10923
10924         if (magic == TG3_EEPROM_MAGIC)
10925                 size = NVRAM_TEST_SIZE;
10926         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10927                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10928                     TG3_EEPROM_SB_FORMAT_1) {
10929                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10930                         case TG3_EEPROM_SB_REVISION_0:
10931                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10932                                 break;
10933                         case TG3_EEPROM_SB_REVISION_2:
10934                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10935                                 break;
10936                         case TG3_EEPROM_SB_REVISION_3:
10937                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10938                                 break;
10939                         case TG3_EEPROM_SB_REVISION_4:
10940                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10941                                 break;
10942                         case TG3_EEPROM_SB_REVISION_5:
10943                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10944                                 break;
10945                         case TG3_EEPROM_SB_REVISION_6:
10946                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10947                                 break;
10948                         default:
10949                                 return -EIO;
10950                         }
10951                 } else
10952                         return 0;
10953         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10954                 size = NVRAM_SELFBOOT_HW_SIZE;
10955         else
10956                 return -EIO;
10957
10958         buf = kmalloc(size, GFP_KERNEL);
10959         if (buf == NULL)
10960                 return -ENOMEM;
10961
10962         err = -EIO;
10963         for (i = 0, j = 0; i < size; i += 4, j++) {
10964                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10965                 if (err)
10966                         break;
10967         }
10968         if (i < size)
10969                 goto out;
10970
10971         /* Selfboot format */
10972         magic = be32_to_cpu(buf[0]);
10973         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10974             TG3_EEPROM_MAGIC_FW) {
10975                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10976
10977                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10978                     TG3_EEPROM_SB_REVISION_2) {
10979                         /* For rev 2, the csum doesn't include the MBA. */
10980                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10981                                 csum8 += buf8[i];
10982                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10983                                 csum8 += buf8[i];
10984                 } else {
10985                         for (i = 0; i < size; i++)
10986                                 csum8 += buf8[i];
10987                 }
10988
10989                 if (csum8 == 0) {
10990                         err = 0;
10991                         goto out;
10992                 }
10993
10994                 err = -EIO;
10995                 goto out;
10996         }
10997
10998         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10999             TG3_EEPROM_MAGIC_HW) {
11000                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11001                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11002                 u8 *buf8 = (u8 *) buf;
11003
11004                 /* Separate the parity bits and the data bytes.  */
11005                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11006                         if ((i == 0) || (i == 8)) {
11007                                 int l;
11008                                 u8 msk;
11009
11010                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11011                                         parity[k++] = buf8[i] & msk;
11012                                 i++;
11013                         } else if (i == 16) {
11014                                 int l;
11015                                 u8 msk;
11016
11017                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11018                                         parity[k++] = buf8[i] & msk;
11019                                 i++;
11020
11021                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11022                                         parity[k++] = buf8[i] & msk;
11023                                 i++;
11024                         }
11025                         data[j++] = buf8[i];
11026                 }
11027
11028                 err = -EIO;
11029                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11030                         u8 hw8 = hweight8(data[i]);
11031
11032                         if ((hw8 & 0x1) && parity[i])
11033                                 goto out;
11034                         else if (!(hw8 & 0x1) && !parity[i])
11035                                 goto out;
11036                 }
11037                 err = 0;
11038                 goto out;
11039         }
11040
11041         err = -EIO;
11042
11043         /* Bootstrap checksum at offset 0x10 */
11044         csum = calc_crc((unsigned char *) buf, 0x10);
11045         if (csum != le32_to_cpu(buf[0x10/4]))
11046                 goto out;
11047
11048         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11049         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11050         if (csum != le32_to_cpu(buf[0xfc/4]))
11051                 goto out;
11052
11053         kfree(buf);
11054
11055         buf = tg3_vpd_readblock(tp, &len);
11056         if (!buf)
11057                 return -ENOMEM;
11058
11059         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11060         if (i > 0) {
11061                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11062                 if (j < 0)
11063                         goto out;
11064
11065                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11066                         goto out;
11067
11068                 i += PCI_VPD_LRDT_TAG_SIZE;
11069                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11070                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11071                 if (j > 0) {
11072                         u8 csum8 = 0;
11073
11074                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11075
11076                         for (i = 0; i <= j; i++)
11077                                 csum8 += ((u8 *)buf)[i];
11078
11079                         if (csum8)
11080                                 goto out;
11081                 }
11082         }
11083
11084         err = 0;
11085
11086 out:
11087         kfree(buf);
11088         return err;
11089 }
11090
11091 #define TG3_SERDES_TIMEOUT_SEC  2
11092 #define TG3_COPPER_TIMEOUT_SEC  6
11093
11094 static int tg3_test_link(struct tg3 *tp)
11095 {
11096         int i, max;
11097
11098         if (!netif_running(tp->dev))
11099                 return -ENODEV;
11100
11101         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11102                 max = TG3_SERDES_TIMEOUT_SEC;
11103         else
11104                 max = TG3_COPPER_TIMEOUT_SEC;
11105
11106         for (i = 0; i < max; i++) {
11107                 if (netif_carrier_ok(tp->dev))
11108                         return 0;
11109
11110                 if (msleep_interruptible(1000))
11111                         break;
11112         }
11113
11114         return -EIO;
11115 }
11116
11117 /* Only test the commonly used registers */
11118 static int tg3_test_registers(struct tg3 *tp)
11119 {
11120         int i, is_5705, is_5750;
11121         u32 offset, read_mask, write_mask, val, save_val, read_val;
11122         static struct {
11123                 u16 offset;
11124                 u16 flags;
11125 #define TG3_FL_5705     0x1
11126 #define TG3_FL_NOT_5705 0x2
11127 #define TG3_FL_NOT_5788 0x4
11128 #define TG3_FL_NOT_5750 0x8
11129                 u32 read_mask;
11130                 u32 write_mask;
11131         } reg_tbl[] = {
11132                 /* MAC Control Registers */
11133                 { MAC_MODE, TG3_FL_NOT_5705,
11134                         0x00000000, 0x00ef6f8c },
11135                 { MAC_MODE, TG3_FL_5705,
11136                         0x00000000, 0x01ef6b8c },
11137                 { MAC_STATUS, TG3_FL_NOT_5705,
11138                         0x03800107, 0x00000000 },
11139                 { MAC_STATUS, TG3_FL_5705,
11140                         0x03800100, 0x00000000 },
11141                 { MAC_ADDR_0_HIGH, 0x0000,
11142                         0x00000000, 0x0000ffff },
11143                 { MAC_ADDR_0_LOW, 0x0000,
11144                         0x00000000, 0xffffffff },
11145                 { MAC_RX_MTU_SIZE, 0x0000,
11146                         0x00000000, 0x0000ffff },
11147                 { MAC_TX_MODE, 0x0000,
11148                         0x00000000, 0x00000070 },
11149                 { MAC_TX_LENGTHS, 0x0000,
11150                         0x00000000, 0x00003fff },
11151                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11152                         0x00000000, 0x000007fc },
11153                 { MAC_RX_MODE, TG3_FL_5705,
11154                         0x00000000, 0x000007dc },
11155                 { MAC_HASH_REG_0, 0x0000,
11156                         0x00000000, 0xffffffff },
11157                 { MAC_HASH_REG_1, 0x0000,
11158                         0x00000000, 0xffffffff },
11159                 { MAC_HASH_REG_2, 0x0000,
11160                         0x00000000, 0xffffffff },
11161                 { MAC_HASH_REG_3, 0x0000,
11162                         0x00000000, 0xffffffff },
11163
11164                 /* Receive Data and Receive BD Initiator Control Registers. */
11165                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11166                         0x00000000, 0xffffffff },
11167                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11168                         0x00000000, 0xffffffff },
11169                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11170                         0x00000000, 0x00000003 },
11171                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11172                         0x00000000, 0xffffffff },
11173                 { RCVDBDI_STD_BD+0, 0x0000,
11174                         0x00000000, 0xffffffff },
11175                 { RCVDBDI_STD_BD+4, 0x0000,
11176                         0x00000000, 0xffffffff },
11177                 { RCVDBDI_STD_BD+8, 0x0000,
11178                         0x00000000, 0xffff0002 },
11179                 { RCVDBDI_STD_BD+0xc, 0x0000,
11180                         0x00000000, 0xffffffff },
11181
11182                 /* Receive BD Initiator Control Registers. */
11183                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11184                         0x00000000, 0xffffffff },
11185                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11186                         0x00000000, 0x000003ff },
11187                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11188                         0x00000000, 0xffffffff },
11189
11190                 /* Host Coalescing Control Registers. */
11191                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11192                         0x00000000, 0x00000004 },
11193                 { HOSTCC_MODE, TG3_FL_5705,
11194                         0x00000000, 0x000000f6 },
11195                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11196                         0x00000000, 0xffffffff },
11197                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11198                         0x00000000, 0x000003ff },
11199                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11200                         0x00000000, 0xffffffff },
11201                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11202                         0x00000000, 0x000003ff },
11203                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11204                         0x00000000, 0xffffffff },
11205                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11206                         0x00000000, 0x000000ff },
11207                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11208                         0x00000000, 0xffffffff },
11209                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11210                         0x00000000, 0x000000ff },
11211                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11212                         0x00000000, 0xffffffff },
11213                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11214                         0x00000000, 0xffffffff },
11215                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11216                         0x00000000, 0xffffffff },
11217                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11218                         0x00000000, 0x000000ff },
11219                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11220                         0x00000000, 0xffffffff },
11221                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11222                         0x00000000, 0x000000ff },
11223                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11224                         0x00000000, 0xffffffff },
11225                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11226                         0x00000000, 0xffffffff },
11227                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11228                         0x00000000, 0xffffffff },
11229                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11230                         0x00000000, 0xffffffff },
11231                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11232                         0x00000000, 0xffffffff },
11233                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11234                         0xffffffff, 0x00000000 },
11235                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11236                         0xffffffff, 0x00000000 },
11237
11238                 /* Buffer Manager Control Registers. */
11239                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11240                         0x00000000, 0x007fff80 },
11241                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11242                         0x00000000, 0x007fffff },
11243                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11244                         0x00000000, 0x0000003f },
11245                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11246                         0x00000000, 0x000001ff },
11247                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11248                         0x00000000, 0x000001ff },
11249                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11250                         0xffffffff, 0x00000000 },
11251                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11252                         0xffffffff, 0x00000000 },
11253
11254                 /* Mailbox Registers */
11255                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11256                         0x00000000, 0x000001ff },
11257                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11258                         0x00000000, 0x000001ff },
11259                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11260                         0x00000000, 0x000007ff },
11261                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11262                         0x00000000, 0x000001ff },
11263
11264                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11265         };
11266
11267         is_5705 = is_5750 = 0;
11268         if (tg3_flag(tp, 5705_PLUS)) {
11269                 is_5705 = 1;
11270                 if (tg3_flag(tp, 5750_PLUS))
11271                         is_5750 = 1;
11272         }
11273
11274         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11275                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11276                         continue;
11277
11278                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11279                         continue;
11280
11281                 if (tg3_flag(tp, IS_5788) &&
11282                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11283                         continue;
11284
11285                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11286                         continue;
11287
11288                 offset = (u32) reg_tbl[i].offset;
11289                 read_mask = reg_tbl[i].read_mask;
11290                 write_mask = reg_tbl[i].write_mask;
11291
11292                 /* Save the original register content */
11293                 save_val = tr32(offset);
11294
11295                 /* Determine the read-only value. */
11296                 read_val = save_val & read_mask;
11297
11298                 /* Write zero to the register, then make sure the read-only bits
11299                  * are not changed and the read/write bits are all zeros.
11300                  */
11301                 tw32(offset, 0);
11302
11303                 val = tr32(offset);
11304
11305                 /* Test the read-only and read/write bits. */
11306                 if (((val & read_mask) != read_val) || (val & write_mask))
11307                         goto out;
11308
11309                 /* Write ones to all the bits defined by RdMask and WrMask, then
11310                  * make sure the read-only bits are not changed and the
11311                  * read/write bits are all ones.
11312                  */
11313                 tw32(offset, read_mask | write_mask);
11314
11315                 val = tr32(offset);
11316
11317                 /* Test the read-only bits. */
11318                 if ((val & read_mask) != read_val)
11319                         goto out;
11320
11321                 /* Test the read/write bits. */
11322                 if ((val & write_mask) != write_mask)
11323                         goto out;
11324
11325                 tw32(offset, save_val);
11326         }
11327
11328         return 0;
11329
11330 out:
11331         if (netif_msg_hw(tp))
11332                 netdev_err(tp->dev,
11333                            "Register test failed at offset %x\n", offset);
11334         tw32(offset, save_val);
11335         return -EIO;
11336 }
11337
11338 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11339 {
11340         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11341         int i;
11342         u32 j;
11343
11344         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11345                 for (j = 0; j < len; j += 4) {
11346                         u32 val;
11347
11348                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11349                         tg3_read_mem(tp, offset + j, &val);
11350                         if (val != test_pattern[i])
11351                                 return -EIO;
11352                 }
11353         }
11354         return 0;
11355 }
11356
11357 static int tg3_test_memory(struct tg3 *tp)
11358 {
11359         static struct mem_entry {
11360                 u32 offset;
11361                 u32 len;
11362         } mem_tbl_570x[] = {
11363                 { 0x00000000, 0x00b50},
11364                 { 0x00002000, 0x1c000},
11365                 { 0xffffffff, 0x00000}
11366         }, mem_tbl_5705[] = {
11367                 { 0x00000100, 0x0000c},
11368                 { 0x00000200, 0x00008},
11369                 { 0x00004000, 0x00800},
11370                 { 0x00006000, 0x01000},
11371                 { 0x00008000, 0x02000},
11372                 { 0x00010000, 0x0e000},
11373                 { 0xffffffff, 0x00000}
11374         }, mem_tbl_5755[] = {
11375                 { 0x00000200, 0x00008},
11376                 { 0x00004000, 0x00800},
11377                 { 0x00006000, 0x00800},
11378                 { 0x00008000, 0x02000},
11379                 { 0x00010000, 0x0c000},
11380                 { 0xffffffff, 0x00000}
11381         }, mem_tbl_5906[] = {
11382                 { 0x00000200, 0x00008},
11383                 { 0x00004000, 0x00400},
11384                 { 0x00006000, 0x00400},
11385                 { 0x00008000, 0x01000},
11386                 { 0x00010000, 0x01000},
11387                 { 0xffffffff, 0x00000}
11388         }, mem_tbl_5717[] = {
11389                 { 0x00000200, 0x00008},
11390                 { 0x00010000, 0x0a000},
11391                 { 0x00020000, 0x13c00},
11392                 { 0xffffffff, 0x00000}
11393         }, mem_tbl_57765[] = {
11394                 { 0x00000200, 0x00008},
11395                 { 0x00004000, 0x00800},
11396                 { 0x00006000, 0x09800},
11397                 { 0x00010000, 0x0a000},
11398                 { 0xffffffff, 0x00000}
11399         };
11400         struct mem_entry *mem_tbl;
11401         int err = 0;
11402         int i;
11403
11404         if (tg3_flag(tp, 5717_PLUS))
11405                 mem_tbl = mem_tbl_5717;
11406         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11407                 mem_tbl = mem_tbl_57765;
11408         else if (tg3_flag(tp, 5755_PLUS))
11409                 mem_tbl = mem_tbl_5755;
11410         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11411                 mem_tbl = mem_tbl_5906;
11412         else if (tg3_flag(tp, 5705_PLUS))
11413                 mem_tbl = mem_tbl_5705;
11414         else
11415                 mem_tbl = mem_tbl_570x;
11416
11417         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11418                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11419                 if (err)
11420                         break;
11421         }
11422
11423         return err;
11424 }
11425
11426 #define TG3_TSO_MSS             500
11427
11428 #define TG3_TSO_IP_HDR_LEN      20
11429 #define TG3_TSO_TCP_HDR_LEN     20
11430 #define TG3_TSO_TCP_OPT_LEN     12
11431
11432 static const u8 tg3_tso_header[] = {
11433 0x08, 0x00,
11434 0x45, 0x00, 0x00, 0x00,
11435 0x00, 0x00, 0x40, 0x00,
11436 0x40, 0x06, 0x00, 0x00,
11437 0x0a, 0x00, 0x00, 0x01,
11438 0x0a, 0x00, 0x00, 0x02,
11439 0x0d, 0x00, 0xe0, 0x00,
11440 0x00, 0x00, 0x01, 0x00,
11441 0x00, 0x00, 0x02, 0x00,
11442 0x80, 0x10, 0x10, 0x00,
11443 0x14, 0x09, 0x00, 0x00,
11444 0x01, 0x01, 0x08, 0x0a,
11445 0x11, 0x11, 0x11, 0x11,
11446 0x11, 0x11, 0x11, 0x11,
11447 };
11448
11449 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11450 {
11451         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11452         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11453         u32 budget;
11454         struct sk_buff *skb, *rx_skb;
11455         u8 *tx_data;
11456         dma_addr_t map;
11457         int num_pkts, tx_len, rx_len, i, err;
11458         struct tg3_rx_buffer_desc *desc;
11459         struct tg3_napi *tnapi, *rnapi;
11460         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11461
11462         tnapi = &tp->napi[0];
11463         rnapi = &tp->napi[0];
11464         if (tp->irq_cnt > 1) {
11465                 if (tg3_flag(tp, ENABLE_RSS))
11466                         rnapi = &tp->napi[1];
11467                 if (tg3_flag(tp, ENABLE_TSS))
11468                         tnapi = &tp->napi[1];
11469         }
11470         coal_now = tnapi->coal_now | rnapi->coal_now;
11471
11472         err = -EIO;
11473
11474         tx_len = pktsz;
11475         skb = netdev_alloc_skb(tp->dev, tx_len);
11476         if (!skb)
11477                 return -ENOMEM;
11478
11479         tx_data = skb_put(skb, tx_len);
11480         memcpy(tx_data, tp->dev->dev_addr, 6);
11481         memset(tx_data + 6, 0x0, 8);
11482
11483         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11484
11485         if (tso_loopback) {
11486                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11487
11488                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11489                               TG3_TSO_TCP_OPT_LEN;
11490
11491                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11492                        sizeof(tg3_tso_header));
11493                 mss = TG3_TSO_MSS;
11494
11495                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11496                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11497
11498                 /* Set the total length field in the IP header */
11499                 iph->tot_len = htons((u16)(mss + hdr_len));
11500
11501                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11502                               TXD_FLAG_CPU_POST_DMA);
11503
11504                 if (tg3_flag(tp, HW_TSO_1) ||
11505                     tg3_flag(tp, HW_TSO_2) ||
11506                     tg3_flag(tp, HW_TSO_3)) {
11507                         struct tcphdr *th;
11508                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11509                         th = (struct tcphdr *)&tx_data[val];
11510                         th->check = 0;
11511                 } else
11512                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11513
11514                 if (tg3_flag(tp, HW_TSO_3)) {
11515                         mss |= (hdr_len & 0xc) << 12;
11516                         if (hdr_len & 0x10)
11517                                 base_flags |= 0x00000010;
11518                         base_flags |= (hdr_len & 0x3e0) << 5;
11519                 } else if (tg3_flag(tp, HW_TSO_2))
11520                         mss |= hdr_len << 9;
11521                 else if (tg3_flag(tp, HW_TSO_1) ||
11522                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11523                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11524                 } else {
11525                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11526                 }
11527
11528                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11529         } else {
11530                 num_pkts = 1;
11531                 data_off = ETH_HLEN;
11532         }
11533
11534         for (i = data_off; i < tx_len; i++)
11535                 tx_data[i] = (u8) (i & 0xff);
11536
11537         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11538         if (pci_dma_mapping_error(tp->pdev, map)) {
11539                 dev_kfree_skb(skb);
11540                 return -EIO;
11541         }
11542
11543         val = tnapi->tx_prod;
11544         tnapi->tx_buffers[val].skb = skb;
11545         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11546
11547         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11548                rnapi->coal_now);
11549
11550         udelay(10);
11551
11552         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11553
11554         budget = tg3_tx_avail(tnapi);
11555         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11556                             base_flags | TXD_FLAG_END, mss, 0)) {
11557                 tnapi->tx_buffers[val].skb = NULL;
11558                 dev_kfree_skb(skb);
11559                 return -EIO;
11560         }
11561
11562         tnapi->tx_prod++;
11563
11564         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11565         tr32_mailbox(tnapi->prodmbox);
11566
11567         udelay(10);
11568
11569         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11570         for (i = 0; i < 35; i++) {
11571                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11572                        coal_now);
11573
11574                 udelay(10);
11575
11576                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11577                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11578                 if ((tx_idx == tnapi->tx_prod) &&
11579                     (rx_idx == (rx_start_idx + num_pkts)))
11580                         break;
11581         }
11582
11583         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11584         dev_kfree_skb(skb);
11585
11586         if (tx_idx != tnapi->tx_prod)
11587                 goto out;
11588
11589         if (rx_idx != rx_start_idx + num_pkts)
11590                 goto out;
11591
11592         val = data_off;
11593         while (rx_idx != rx_start_idx) {
11594                 desc = &rnapi->rx_rcb[rx_start_idx++];
11595                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11596                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11597
11598                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11599                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11600                         goto out;
11601
11602                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11603                          - ETH_FCS_LEN;
11604
11605                 if (!tso_loopback) {
11606                         if (rx_len != tx_len)
11607                                 goto out;
11608
11609                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11610                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11611                                         goto out;
11612                         } else {
11613                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11614                                         goto out;
11615                         }
11616                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11617                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11618                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11619                         goto out;
11620                 }
11621
11622                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11623                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11624                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11625                                              mapping);
11626                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11627                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11628                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11629                                              mapping);
11630                 } else
11631                         goto out;
11632
11633                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11634                                             PCI_DMA_FROMDEVICE);
11635
11636                 for (i = data_off; i < rx_len; i++, val++) {
11637                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11638                                 goto out;
11639                 }
11640         }
11641
11642         err = 0;
11643
11644         /* tg3_free_rings will unmap and free the rx_skb */
11645 out:
11646         return err;
11647 }
11648
11649 #define TG3_STD_LOOPBACK_FAILED         1
11650 #define TG3_JMB_LOOPBACK_FAILED         2
11651 #define TG3_TSO_LOOPBACK_FAILED         4
11652 #define TG3_LOOPBACK_FAILED \
11653         (TG3_STD_LOOPBACK_FAILED | \
11654          TG3_JMB_LOOPBACK_FAILED | \
11655          TG3_TSO_LOOPBACK_FAILED)
11656
11657 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11658 {
11659         int err = -EIO;
11660         u32 eee_cap;
11661
11662         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11663         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11664
11665         if (!netif_running(tp->dev)) {
11666                 data[0] = TG3_LOOPBACK_FAILED;
11667                 data[1] = TG3_LOOPBACK_FAILED;
11668                 if (do_extlpbk)
11669                         data[2] = TG3_LOOPBACK_FAILED;
11670                 goto done;
11671         }
11672
11673         err = tg3_reset_hw(tp, 1);
11674         if (err) {
11675                 data[0] = TG3_LOOPBACK_FAILED;
11676                 data[1] = TG3_LOOPBACK_FAILED;
11677                 if (do_extlpbk)
11678                         data[2] = TG3_LOOPBACK_FAILED;
11679                 goto done;
11680         }
11681
11682         if (tg3_flag(tp, ENABLE_RSS)) {
11683                 int i;
11684
11685                 /* Reroute all rx packets to the 1st queue */
11686                 for (i = MAC_RSS_INDIR_TBL_0;
11687                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11688                         tw32(i, 0x0);
11689         }
11690
11691         /* HW errata - mac loopback fails in some cases on 5780.
11692          * Normal traffic and PHY loopback are not affected by
11693          * errata.  Also, the MAC loopback test is deprecated for
11694          * all newer ASIC revisions.
11695          */
11696         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11697             !tg3_flag(tp, CPMU_PRESENT)) {
11698                 tg3_mac_loopback(tp, true);
11699
11700                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11701                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11702
11703                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11704                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11705                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11706
11707                 tg3_mac_loopback(tp, false);
11708         }
11709
11710         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11711             !tg3_flag(tp, USE_PHYLIB)) {
11712                 int i;
11713
11714                 tg3_phy_lpbk_set(tp, 0, false);
11715
11716                 /* Wait for link */
11717                 for (i = 0; i < 100; i++) {
11718                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11719                                 break;
11720                         mdelay(1);
11721                 }
11722
11723                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11724                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11725                 if (tg3_flag(tp, TSO_CAPABLE) &&
11726                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11727                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11728                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11729                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11730                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11731
11732                 if (do_extlpbk) {
11733                         tg3_phy_lpbk_set(tp, 0, true);
11734
11735                         /* All link indications report up, but the hardware
11736                          * isn't really ready for about 20 msec.  Double it
11737                          * to be sure.
11738                          */
11739                         mdelay(40);
11740
11741                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11742                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11743                         if (tg3_flag(tp, TSO_CAPABLE) &&
11744                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11745                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11746                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11747                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11748                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11749                 }
11750
11751                 /* Re-enable gphy autopowerdown. */
11752                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11753                         tg3_phy_toggle_apd(tp, true);
11754         }
11755
11756         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11757
11758 done:
11759         tp->phy_flags |= eee_cap;
11760
11761         return err;
11762 }
11763
11764 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11765                           u64 *data)
11766 {
11767         struct tg3 *tp = netdev_priv(dev);
11768         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11769
11770         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11771             tg3_power_up(tp)) {
11772                 etest->flags |= ETH_TEST_FL_FAILED;
11773                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11774                 return;
11775         }
11776
11777         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11778
11779         if (tg3_test_nvram(tp) != 0) {
11780                 etest->flags |= ETH_TEST_FL_FAILED;
11781                 data[0] = 1;
11782         }
11783         if (!doextlpbk && tg3_test_link(tp)) {
11784                 etest->flags |= ETH_TEST_FL_FAILED;
11785                 data[1] = 1;
11786         }
11787         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11788                 int err, err2 = 0, irq_sync = 0;
11789
11790                 if (netif_running(dev)) {
11791                         tg3_phy_stop(tp);
11792                         tg3_netif_stop(tp);
11793                         irq_sync = 1;
11794                 }
11795
11796                 tg3_full_lock(tp, irq_sync);
11797
11798                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11799                 err = tg3_nvram_lock(tp);
11800                 tg3_halt_cpu(tp, RX_CPU_BASE);
11801                 if (!tg3_flag(tp, 5705_PLUS))
11802                         tg3_halt_cpu(tp, TX_CPU_BASE);
11803                 if (!err)
11804                         tg3_nvram_unlock(tp);
11805
11806                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11807                         tg3_phy_reset(tp);
11808
11809                 if (tg3_test_registers(tp) != 0) {
11810                         etest->flags |= ETH_TEST_FL_FAILED;
11811                         data[2] = 1;
11812                 }
11813
11814                 if (tg3_test_memory(tp) != 0) {
11815                         etest->flags |= ETH_TEST_FL_FAILED;
11816                         data[3] = 1;
11817                 }
11818
11819                 if (doextlpbk)
11820                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11821
11822                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11823                         etest->flags |= ETH_TEST_FL_FAILED;
11824
11825                 tg3_full_unlock(tp);
11826
11827                 if (tg3_test_interrupt(tp) != 0) {
11828                         etest->flags |= ETH_TEST_FL_FAILED;
11829                         data[7] = 1;
11830                 }
11831
11832                 tg3_full_lock(tp, 0);
11833
11834                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11835                 if (netif_running(dev)) {
11836                         tg3_flag_set(tp, INIT_COMPLETE);
11837                         err2 = tg3_restart_hw(tp, 1);
11838                         if (!err2)
11839                                 tg3_netif_start(tp);
11840                 }
11841
11842                 tg3_full_unlock(tp);
11843
11844                 if (irq_sync && !err2)
11845                         tg3_phy_start(tp);
11846         }
11847         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11848                 tg3_power_down(tp);
11849
11850 }
11851
11852 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11853 {
11854         struct mii_ioctl_data *data = if_mii(ifr);
11855         struct tg3 *tp = netdev_priv(dev);
11856         int err;
11857
11858         if (tg3_flag(tp, USE_PHYLIB)) {
11859                 struct phy_device *phydev;
11860                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11861                         return -EAGAIN;
11862                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11863                 return phy_mii_ioctl(phydev, ifr, cmd);
11864         }
11865
11866         switch (cmd) {
11867         case SIOCGMIIPHY:
11868                 data->phy_id = tp->phy_addr;
11869
11870                 /* fallthru */
11871         case SIOCGMIIREG: {
11872                 u32 mii_regval;
11873
11874                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11875                         break;                  /* We have no PHY */
11876
11877                 if (!netif_running(dev))
11878                         return -EAGAIN;
11879
11880                 spin_lock_bh(&tp->lock);
11881                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11882                 spin_unlock_bh(&tp->lock);
11883
11884                 data->val_out = mii_regval;
11885
11886                 return err;
11887         }
11888
11889         case SIOCSMIIREG:
11890                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11891                         break;                  /* We have no PHY */
11892
11893                 if (!netif_running(dev))
11894                         return -EAGAIN;
11895
11896                 spin_lock_bh(&tp->lock);
11897                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11898                 spin_unlock_bh(&tp->lock);
11899
11900                 return err;
11901
11902         default:
11903                 /* do nothing */
11904                 break;
11905         }
11906         return -EOPNOTSUPP;
11907 }
11908
11909 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11910 {
11911         struct tg3 *tp = netdev_priv(dev);
11912
11913         memcpy(ec, &tp->coal, sizeof(*ec));
11914         return 0;
11915 }
11916
11917 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11918 {
11919         struct tg3 *tp = netdev_priv(dev);
11920         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11921         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11922
11923         if (!tg3_flag(tp, 5705_PLUS)) {
11924                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11925                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11926                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11927                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11928         }
11929
11930         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11931             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11932             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11933             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11934             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11935             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11936             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11937             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11938             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11939             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11940                 return -EINVAL;
11941
11942         /* No rx interrupts will be generated if both are zero */
11943         if ((ec->rx_coalesce_usecs == 0) &&
11944             (ec->rx_max_coalesced_frames == 0))
11945                 return -EINVAL;
11946
11947         /* No tx interrupts will be generated if both are zero */
11948         if ((ec->tx_coalesce_usecs == 0) &&
11949             (ec->tx_max_coalesced_frames == 0))
11950                 return -EINVAL;
11951
11952         /* Only copy relevant parameters, ignore all others. */
11953         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11954         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11955         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11956         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11957         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11958         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11959         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11960         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11961         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11962
11963         if (netif_running(dev)) {
11964                 tg3_full_lock(tp, 0);
11965                 __tg3_set_coalesce(tp, &tp->coal);
11966                 tg3_full_unlock(tp);
11967         }
11968         return 0;
11969 }
11970
11971 static const struct ethtool_ops tg3_ethtool_ops = {
11972         .get_settings           = tg3_get_settings,
11973         .set_settings           = tg3_set_settings,
11974         .get_drvinfo            = tg3_get_drvinfo,
11975         .get_regs_len           = tg3_get_regs_len,
11976         .get_regs               = tg3_get_regs,
11977         .get_wol                = tg3_get_wol,
11978         .set_wol                = tg3_set_wol,
11979         .get_msglevel           = tg3_get_msglevel,
11980         .set_msglevel           = tg3_set_msglevel,
11981         .nway_reset             = tg3_nway_reset,
11982         .get_link               = ethtool_op_get_link,
11983         .get_eeprom_len         = tg3_get_eeprom_len,
11984         .get_eeprom             = tg3_get_eeprom,
11985         .set_eeprom             = tg3_set_eeprom,
11986         .get_ringparam          = tg3_get_ringparam,
11987         .set_ringparam          = tg3_set_ringparam,
11988         .get_pauseparam         = tg3_get_pauseparam,
11989         .set_pauseparam         = tg3_set_pauseparam,
11990         .self_test              = tg3_self_test,
11991         .get_strings            = tg3_get_strings,
11992         .set_phys_id            = tg3_set_phys_id,
11993         .get_ethtool_stats      = tg3_get_ethtool_stats,
11994         .get_coalesce           = tg3_get_coalesce,
11995         .set_coalesce           = tg3_set_coalesce,
11996         .get_sset_count         = tg3_get_sset_count,
11997 };
11998
11999 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12000 {
12001         u32 cursize, val, magic;
12002
12003         tp->nvram_size = EEPROM_CHIP_SIZE;
12004
12005         if (tg3_nvram_read(tp, 0, &magic) != 0)
12006                 return;
12007
12008         if ((magic != TG3_EEPROM_MAGIC) &&
12009             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12010             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12011                 return;
12012
12013         /*
12014          * Size the chip by reading offsets at increasing powers of two.
12015          * When we encounter our validation signature, we know the addressing
12016          * has wrapped around, and thus have our chip size.
12017          */
12018         cursize = 0x10;
12019
12020         while (cursize < tp->nvram_size) {
12021                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12022                         return;
12023
12024                 if (val == magic)
12025                         break;
12026
12027                 cursize <<= 1;
12028         }
12029
12030         tp->nvram_size = cursize;
12031 }
12032
12033 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12034 {
12035         u32 val;
12036
12037         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12038                 return;
12039
12040         /* Selfboot format */
12041         if (val != TG3_EEPROM_MAGIC) {
12042                 tg3_get_eeprom_size(tp);
12043                 return;
12044         }
12045
12046         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12047                 if (val != 0) {
12048                         /* This is confusing.  We want to operate on the
12049                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12050                          * call will read from NVRAM and byteswap the data
12051                          * according to the byteswapping settings for all
12052                          * other register accesses.  This ensures the data we
12053                          * want will always reside in the lower 16-bits.
12054                          * However, the data in NVRAM is in LE format, which
12055                          * means the data from the NVRAM read will always be
12056                          * opposite the endianness of the CPU.  The 16-bit
12057                          * byteswap then brings the data to CPU endianness.
12058                          */
12059                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12060                         return;
12061                 }
12062         }
12063         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12064 }
12065
12066 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12067 {
12068         u32 nvcfg1;
12069
12070         nvcfg1 = tr32(NVRAM_CFG1);
12071         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12072                 tg3_flag_set(tp, FLASH);
12073         } else {
12074                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12075                 tw32(NVRAM_CFG1, nvcfg1);
12076         }
12077
12078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12079             tg3_flag(tp, 5780_CLASS)) {
12080                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12081                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12082                         tp->nvram_jedecnum = JEDEC_ATMEL;
12083                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12084                         tg3_flag_set(tp, NVRAM_BUFFERED);
12085                         break;
12086                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12087                         tp->nvram_jedecnum = JEDEC_ATMEL;
12088                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12089                         break;
12090                 case FLASH_VENDOR_ATMEL_EEPROM:
12091                         tp->nvram_jedecnum = JEDEC_ATMEL;
12092                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12093                         tg3_flag_set(tp, NVRAM_BUFFERED);
12094                         break;
12095                 case FLASH_VENDOR_ST:
12096                         tp->nvram_jedecnum = JEDEC_ST;
12097                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12098                         tg3_flag_set(tp, NVRAM_BUFFERED);
12099                         break;
12100                 case FLASH_VENDOR_SAIFUN:
12101                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12102                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12103                         break;
12104                 case FLASH_VENDOR_SST_SMALL:
12105                 case FLASH_VENDOR_SST_LARGE:
12106                         tp->nvram_jedecnum = JEDEC_SST;
12107                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12108                         break;
12109                 }
12110         } else {
12111                 tp->nvram_jedecnum = JEDEC_ATMEL;
12112                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12113                 tg3_flag_set(tp, NVRAM_BUFFERED);
12114         }
12115 }
12116
12117 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12118 {
12119         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12120         case FLASH_5752PAGE_SIZE_256:
12121                 tp->nvram_pagesize = 256;
12122                 break;
12123         case FLASH_5752PAGE_SIZE_512:
12124                 tp->nvram_pagesize = 512;
12125                 break;
12126         case FLASH_5752PAGE_SIZE_1K:
12127                 tp->nvram_pagesize = 1024;
12128                 break;
12129         case FLASH_5752PAGE_SIZE_2K:
12130                 tp->nvram_pagesize = 2048;
12131                 break;
12132         case FLASH_5752PAGE_SIZE_4K:
12133                 tp->nvram_pagesize = 4096;
12134                 break;
12135         case FLASH_5752PAGE_SIZE_264:
12136                 tp->nvram_pagesize = 264;
12137                 break;
12138         case FLASH_5752PAGE_SIZE_528:
12139                 tp->nvram_pagesize = 528;
12140                 break;
12141         }
12142 }
12143
12144 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12145 {
12146         u32 nvcfg1;
12147
12148         nvcfg1 = tr32(NVRAM_CFG1);
12149
12150         /* NVRAM protection for TPM */
12151         if (nvcfg1 & (1 << 27))
12152                 tg3_flag_set(tp, PROTECTED_NVRAM);
12153
12154         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12155         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12156         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12157                 tp->nvram_jedecnum = JEDEC_ATMEL;
12158                 tg3_flag_set(tp, NVRAM_BUFFERED);
12159                 break;
12160         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12161                 tp->nvram_jedecnum = JEDEC_ATMEL;
12162                 tg3_flag_set(tp, NVRAM_BUFFERED);
12163                 tg3_flag_set(tp, FLASH);
12164                 break;
12165         case FLASH_5752VENDOR_ST_M45PE10:
12166         case FLASH_5752VENDOR_ST_M45PE20:
12167         case FLASH_5752VENDOR_ST_M45PE40:
12168                 tp->nvram_jedecnum = JEDEC_ST;
12169                 tg3_flag_set(tp, NVRAM_BUFFERED);
12170                 tg3_flag_set(tp, FLASH);
12171                 break;
12172         }
12173
12174         if (tg3_flag(tp, FLASH)) {
12175                 tg3_nvram_get_pagesize(tp, nvcfg1);
12176         } else {
12177                 /* For eeprom, set pagesize to maximum eeprom size */
12178                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12179
12180                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12181                 tw32(NVRAM_CFG1, nvcfg1);
12182         }
12183 }
12184
12185 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12186 {
12187         u32 nvcfg1, protect = 0;
12188
12189         nvcfg1 = tr32(NVRAM_CFG1);
12190
12191         /* NVRAM protection for TPM */
12192         if (nvcfg1 & (1 << 27)) {
12193                 tg3_flag_set(tp, PROTECTED_NVRAM);
12194                 protect = 1;
12195         }
12196
12197         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12198         switch (nvcfg1) {
12199         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12200         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12201         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12202         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12203                 tp->nvram_jedecnum = JEDEC_ATMEL;
12204                 tg3_flag_set(tp, NVRAM_BUFFERED);
12205                 tg3_flag_set(tp, FLASH);
12206                 tp->nvram_pagesize = 264;
12207                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12208                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12209                         tp->nvram_size = (protect ? 0x3e200 :
12210                                           TG3_NVRAM_SIZE_512KB);
12211                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12212                         tp->nvram_size = (protect ? 0x1f200 :
12213                                           TG3_NVRAM_SIZE_256KB);
12214                 else
12215                         tp->nvram_size = (protect ? 0x1f200 :
12216                                           TG3_NVRAM_SIZE_128KB);
12217                 break;
12218         case FLASH_5752VENDOR_ST_M45PE10:
12219         case FLASH_5752VENDOR_ST_M45PE20:
12220         case FLASH_5752VENDOR_ST_M45PE40:
12221                 tp->nvram_jedecnum = JEDEC_ST;
12222                 tg3_flag_set(tp, NVRAM_BUFFERED);
12223                 tg3_flag_set(tp, FLASH);
12224                 tp->nvram_pagesize = 256;
12225                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12226                         tp->nvram_size = (protect ?
12227                                           TG3_NVRAM_SIZE_64KB :
12228                                           TG3_NVRAM_SIZE_128KB);
12229                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12230                         tp->nvram_size = (protect ?
12231                                           TG3_NVRAM_SIZE_64KB :
12232                                           TG3_NVRAM_SIZE_256KB);
12233                 else
12234                         tp->nvram_size = (protect ?
12235                                           TG3_NVRAM_SIZE_128KB :
12236                                           TG3_NVRAM_SIZE_512KB);
12237                 break;
12238         }
12239 }
12240
12241 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12242 {
12243         u32 nvcfg1;
12244
12245         nvcfg1 = tr32(NVRAM_CFG1);
12246
12247         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12248         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12249         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12250         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12251         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12252                 tp->nvram_jedecnum = JEDEC_ATMEL;
12253                 tg3_flag_set(tp, NVRAM_BUFFERED);
12254                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12255
12256                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12257                 tw32(NVRAM_CFG1, nvcfg1);
12258                 break;
12259         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12260         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12261         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12262         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12263                 tp->nvram_jedecnum = JEDEC_ATMEL;
12264                 tg3_flag_set(tp, NVRAM_BUFFERED);
12265                 tg3_flag_set(tp, FLASH);
12266                 tp->nvram_pagesize = 264;
12267                 break;
12268         case FLASH_5752VENDOR_ST_M45PE10:
12269         case FLASH_5752VENDOR_ST_M45PE20:
12270         case FLASH_5752VENDOR_ST_M45PE40:
12271                 tp->nvram_jedecnum = JEDEC_ST;
12272                 tg3_flag_set(tp, NVRAM_BUFFERED);
12273                 tg3_flag_set(tp, FLASH);
12274                 tp->nvram_pagesize = 256;
12275                 break;
12276         }
12277 }
12278
12279 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12280 {
12281         u32 nvcfg1, protect = 0;
12282
12283         nvcfg1 = tr32(NVRAM_CFG1);
12284
12285         /* NVRAM protection for TPM */
12286         if (nvcfg1 & (1 << 27)) {
12287                 tg3_flag_set(tp, PROTECTED_NVRAM);
12288                 protect = 1;
12289         }
12290
12291         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12292         switch (nvcfg1) {
12293         case FLASH_5761VENDOR_ATMEL_ADB021D:
12294         case FLASH_5761VENDOR_ATMEL_ADB041D:
12295         case FLASH_5761VENDOR_ATMEL_ADB081D:
12296         case FLASH_5761VENDOR_ATMEL_ADB161D:
12297         case FLASH_5761VENDOR_ATMEL_MDB021D:
12298         case FLASH_5761VENDOR_ATMEL_MDB041D:
12299         case FLASH_5761VENDOR_ATMEL_MDB081D:
12300         case FLASH_5761VENDOR_ATMEL_MDB161D:
12301                 tp->nvram_jedecnum = JEDEC_ATMEL;
12302                 tg3_flag_set(tp, NVRAM_BUFFERED);
12303                 tg3_flag_set(tp, FLASH);
12304                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12305                 tp->nvram_pagesize = 256;
12306                 break;
12307         case FLASH_5761VENDOR_ST_A_M45PE20:
12308         case FLASH_5761VENDOR_ST_A_M45PE40:
12309         case FLASH_5761VENDOR_ST_A_M45PE80:
12310         case FLASH_5761VENDOR_ST_A_M45PE16:
12311         case FLASH_5761VENDOR_ST_M_M45PE20:
12312         case FLASH_5761VENDOR_ST_M_M45PE40:
12313         case FLASH_5761VENDOR_ST_M_M45PE80:
12314         case FLASH_5761VENDOR_ST_M_M45PE16:
12315                 tp->nvram_jedecnum = JEDEC_ST;
12316                 tg3_flag_set(tp, NVRAM_BUFFERED);
12317                 tg3_flag_set(tp, FLASH);
12318                 tp->nvram_pagesize = 256;
12319                 break;
12320         }
12321
12322         if (protect) {
12323                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12324         } else {
12325                 switch (nvcfg1) {
12326                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12327                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12328                 case FLASH_5761VENDOR_ST_A_M45PE16:
12329                 case FLASH_5761VENDOR_ST_M_M45PE16:
12330                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12331                         break;
12332                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12333                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12334                 case FLASH_5761VENDOR_ST_A_M45PE80:
12335                 case FLASH_5761VENDOR_ST_M_M45PE80:
12336                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12337                         break;
12338                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12339                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12340                 case FLASH_5761VENDOR_ST_A_M45PE40:
12341                 case FLASH_5761VENDOR_ST_M_M45PE40:
12342                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12343                         break;
12344                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12345                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12346                 case FLASH_5761VENDOR_ST_A_M45PE20:
12347                 case FLASH_5761VENDOR_ST_M_M45PE20:
12348                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12349                         break;
12350                 }
12351         }
12352 }
12353
12354 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12355 {
12356         tp->nvram_jedecnum = JEDEC_ATMEL;
12357         tg3_flag_set(tp, NVRAM_BUFFERED);
12358         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12359 }
12360
12361 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12362 {
12363         u32 nvcfg1;
12364
12365         nvcfg1 = tr32(NVRAM_CFG1);
12366
12367         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12368         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12369         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12370                 tp->nvram_jedecnum = JEDEC_ATMEL;
12371                 tg3_flag_set(tp, NVRAM_BUFFERED);
12372                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12373
12374                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12375                 tw32(NVRAM_CFG1, nvcfg1);
12376                 return;
12377         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12378         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12379         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12380         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12381         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12382         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12383         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12384                 tp->nvram_jedecnum = JEDEC_ATMEL;
12385                 tg3_flag_set(tp, NVRAM_BUFFERED);
12386                 tg3_flag_set(tp, FLASH);
12387
12388                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12389                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12390                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12391                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12392                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12393                         break;
12394                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12395                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12396                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12397                         break;
12398                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12399                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12400                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12401                         break;
12402                 }
12403                 break;
12404         case FLASH_5752VENDOR_ST_M45PE10:
12405         case FLASH_5752VENDOR_ST_M45PE20:
12406         case FLASH_5752VENDOR_ST_M45PE40:
12407                 tp->nvram_jedecnum = JEDEC_ST;
12408                 tg3_flag_set(tp, NVRAM_BUFFERED);
12409                 tg3_flag_set(tp, FLASH);
12410
12411                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12412                 case FLASH_5752VENDOR_ST_M45PE10:
12413                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12414                         break;
12415                 case FLASH_5752VENDOR_ST_M45PE20:
12416                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12417                         break;
12418                 case FLASH_5752VENDOR_ST_M45PE40:
12419                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12420                         break;
12421                 }
12422                 break;
12423         default:
12424                 tg3_flag_set(tp, NO_NVRAM);
12425                 return;
12426         }
12427
12428         tg3_nvram_get_pagesize(tp, nvcfg1);
12429         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12430                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12431 }
12432
12433
12434 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12435 {
12436         u32 nvcfg1;
12437
12438         nvcfg1 = tr32(NVRAM_CFG1);
12439
12440         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12441         case FLASH_5717VENDOR_ATMEL_EEPROM:
12442         case FLASH_5717VENDOR_MICRO_EEPROM:
12443                 tp->nvram_jedecnum = JEDEC_ATMEL;
12444                 tg3_flag_set(tp, NVRAM_BUFFERED);
12445                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12446
12447                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12448                 tw32(NVRAM_CFG1, nvcfg1);
12449                 return;
12450         case FLASH_5717VENDOR_ATMEL_MDB011D:
12451         case FLASH_5717VENDOR_ATMEL_ADB011B:
12452         case FLASH_5717VENDOR_ATMEL_ADB011D:
12453         case FLASH_5717VENDOR_ATMEL_MDB021D:
12454         case FLASH_5717VENDOR_ATMEL_ADB021B:
12455         case FLASH_5717VENDOR_ATMEL_ADB021D:
12456         case FLASH_5717VENDOR_ATMEL_45USPT:
12457                 tp->nvram_jedecnum = JEDEC_ATMEL;
12458                 tg3_flag_set(tp, NVRAM_BUFFERED);
12459                 tg3_flag_set(tp, FLASH);
12460
12461                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12462                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12463                         /* Detect size with tg3_nvram_get_size() */
12464                         break;
12465                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12466                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12467                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12468                         break;
12469                 default:
12470                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12471                         break;
12472                 }
12473                 break;
12474         case FLASH_5717VENDOR_ST_M_M25PE10:
12475         case FLASH_5717VENDOR_ST_A_M25PE10:
12476         case FLASH_5717VENDOR_ST_M_M45PE10:
12477         case FLASH_5717VENDOR_ST_A_M45PE10:
12478         case FLASH_5717VENDOR_ST_M_M25PE20:
12479         case FLASH_5717VENDOR_ST_A_M25PE20:
12480         case FLASH_5717VENDOR_ST_M_M45PE20:
12481         case FLASH_5717VENDOR_ST_A_M45PE20:
12482         case FLASH_5717VENDOR_ST_25USPT:
12483         case FLASH_5717VENDOR_ST_45USPT:
12484                 tp->nvram_jedecnum = JEDEC_ST;
12485                 tg3_flag_set(tp, NVRAM_BUFFERED);
12486                 tg3_flag_set(tp, FLASH);
12487
12488                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12489                 case FLASH_5717VENDOR_ST_M_M25PE20:
12490                 case FLASH_5717VENDOR_ST_M_M45PE20:
12491                         /* Detect size with tg3_nvram_get_size() */
12492                         break;
12493                 case FLASH_5717VENDOR_ST_A_M25PE20:
12494                 case FLASH_5717VENDOR_ST_A_M45PE20:
12495                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12496                         break;
12497                 default:
12498                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12499                         break;
12500                 }
12501                 break;
12502         default:
12503                 tg3_flag_set(tp, NO_NVRAM);
12504                 return;
12505         }
12506
12507         tg3_nvram_get_pagesize(tp, nvcfg1);
12508         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12509                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12510 }
12511
12512 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12513 {
12514         u32 nvcfg1, nvmpinstrp;
12515
12516         nvcfg1 = tr32(NVRAM_CFG1);
12517         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12518
12519         switch (nvmpinstrp) {
12520         case FLASH_5720_EEPROM_HD:
12521         case FLASH_5720_EEPROM_LD:
12522                 tp->nvram_jedecnum = JEDEC_ATMEL;
12523                 tg3_flag_set(tp, NVRAM_BUFFERED);
12524
12525                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12526                 tw32(NVRAM_CFG1, nvcfg1);
12527                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12528                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12529                 else
12530                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12531                 return;
12532         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12533         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12534         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12535         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12536         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12537         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12538         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12539         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12540         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12541         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12542         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12543         case FLASH_5720VENDOR_ATMEL_45USPT:
12544                 tp->nvram_jedecnum = JEDEC_ATMEL;
12545                 tg3_flag_set(tp, NVRAM_BUFFERED);
12546                 tg3_flag_set(tp, FLASH);
12547
12548                 switch (nvmpinstrp) {
12549                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12550                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12551                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12552                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12553                         break;
12554                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12555                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12556                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12557                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12558                         break;
12559                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12560                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12561                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12562                         break;
12563                 default:
12564                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12565                         break;
12566                 }
12567                 break;
12568         case FLASH_5720VENDOR_M_ST_M25PE10:
12569         case FLASH_5720VENDOR_M_ST_M45PE10:
12570         case FLASH_5720VENDOR_A_ST_M25PE10:
12571         case FLASH_5720VENDOR_A_ST_M45PE10:
12572         case FLASH_5720VENDOR_M_ST_M25PE20:
12573         case FLASH_5720VENDOR_M_ST_M45PE20:
12574         case FLASH_5720VENDOR_A_ST_M25PE20:
12575         case FLASH_5720VENDOR_A_ST_M45PE20:
12576         case FLASH_5720VENDOR_M_ST_M25PE40:
12577         case FLASH_5720VENDOR_M_ST_M45PE40:
12578         case FLASH_5720VENDOR_A_ST_M25PE40:
12579         case FLASH_5720VENDOR_A_ST_M45PE40:
12580         case FLASH_5720VENDOR_M_ST_M25PE80:
12581         case FLASH_5720VENDOR_M_ST_M45PE80:
12582         case FLASH_5720VENDOR_A_ST_M25PE80:
12583         case FLASH_5720VENDOR_A_ST_M45PE80:
12584         case FLASH_5720VENDOR_ST_25USPT:
12585         case FLASH_5720VENDOR_ST_45USPT:
12586                 tp->nvram_jedecnum = JEDEC_ST;
12587                 tg3_flag_set(tp, NVRAM_BUFFERED);
12588                 tg3_flag_set(tp, FLASH);
12589
12590                 switch (nvmpinstrp) {
12591                 case FLASH_5720VENDOR_M_ST_M25PE20:
12592                 case FLASH_5720VENDOR_M_ST_M45PE20:
12593                 case FLASH_5720VENDOR_A_ST_M25PE20:
12594                 case FLASH_5720VENDOR_A_ST_M45PE20:
12595                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12596                         break;
12597                 case FLASH_5720VENDOR_M_ST_M25PE40:
12598                 case FLASH_5720VENDOR_M_ST_M45PE40:
12599                 case FLASH_5720VENDOR_A_ST_M25PE40:
12600                 case FLASH_5720VENDOR_A_ST_M45PE40:
12601                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12602                         break;
12603                 case FLASH_5720VENDOR_M_ST_M25PE80:
12604                 case FLASH_5720VENDOR_M_ST_M45PE80:
12605                 case FLASH_5720VENDOR_A_ST_M25PE80:
12606                 case FLASH_5720VENDOR_A_ST_M45PE80:
12607                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12608                         break;
12609                 default:
12610                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12611                         break;
12612                 }
12613                 break;
12614         default:
12615                 tg3_flag_set(tp, NO_NVRAM);
12616                 return;
12617         }
12618
12619         tg3_nvram_get_pagesize(tp, nvcfg1);
12620         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12621                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12622 }
12623
12624 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12625 static void __devinit tg3_nvram_init(struct tg3 *tp)
12626 {
12627         tw32_f(GRC_EEPROM_ADDR,
12628              (EEPROM_ADDR_FSM_RESET |
12629               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12630                EEPROM_ADDR_CLKPERD_SHIFT)));
12631
12632         msleep(1);
12633
12634         /* Enable seeprom accesses. */
12635         tw32_f(GRC_LOCAL_CTRL,
12636              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12637         udelay(100);
12638
12639         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12640             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12641                 tg3_flag_set(tp, NVRAM);
12642
12643                 if (tg3_nvram_lock(tp)) {
12644                         netdev_warn(tp->dev,
12645                                     "Cannot get nvram lock, %s failed\n",
12646                                     __func__);
12647                         return;
12648                 }
12649                 tg3_enable_nvram_access(tp);
12650
12651                 tp->nvram_size = 0;
12652
12653                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12654                         tg3_get_5752_nvram_info(tp);
12655                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12656                         tg3_get_5755_nvram_info(tp);
12657                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12658                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12659                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12660                         tg3_get_5787_nvram_info(tp);
12661                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12662                         tg3_get_5761_nvram_info(tp);
12663                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12664                         tg3_get_5906_nvram_info(tp);
12665                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12666                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12667                         tg3_get_57780_nvram_info(tp);
12668                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12669                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12670                         tg3_get_5717_nvram_info(tp);
12671                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12672                         tg3_get_5720_nvram_info(tp);
12673                 else
12674                         tg3_get_nvram_info(tp);
12675
12676                 if (tp->nvram_size == 0)
12677                         tg3_get_nvram_size(tp);
12678
12679                 tg3_disable_nvram_access(tp);
12680                 tg3_nvram_unlock(tp);
12681
12682         } else {
12683                 tg3_flag_clear(tp, NVRAM);
12684                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12685
12686                 tg3_get_eeprom_size(tp);
12687         }
12688 }
12689
12690 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12691                                     u32 offset, u32 len, u8 *buf)
12692 {
12693         int i, j, rc = 0;
12694         u32 val;
12695
12696         for (i = 0; i < len; i += 4) {
12697                 u32 addr;
12698                 __be32 data;
12699
12700                 addr = offset + i;
12701
12702                 memcpy(&data, buf + i, 4);
12703
12704                 /*
12705                  * The SEEPROM interface expects the data to always be opposite
12706                  * the native endian format.  We accomplish this by reversing
12707                  * all the operations that would have been performed on the
12708                  * data from a call to tg3_nvram_read_be32().
12709                  */
12710                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12711
12712                 val = tr32(GRC_EEPROM_ADDR);
12713                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12714
12715                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12716                         EEPROM_ADDR_READ);
12717                 tw32(GRC_EEPROM_ADDR, val |
12718                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12719                         (addr & EEPROM_ADDR_ADDR_MASK) |
12720                         EEPROM_ADDR_START |
12721                         EEPROM_ADDR_WRITE);
12722
12723                 for (j = 0; j < 1000; j++) {
12724                         val = tr32(GRC_EEPROM_ADDR);
12725
12726                         if (val & EEPROM_ADDR_COMPLETE)
12727                                 break;
12728                         msleep(1);
12729                 }
12730                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12731                         rc = -EBUSY;
12732                         break;
12733                 }
12734         }
12735
12736         return rc;
12737 }
12738
12739 /* offset and length are dword aligned */
12740 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12741                 u8 *buf)
12742 {
12743         int ret = 0;
12744         u32 pagesize = tp->nvram_pagesize;
12745         u32 pagemask = pagesize - 1;
12746         u32 nvram_cmd;
12747         u8 *tmp;
12748
12749         tmp = kmalloc(pagesize, GFP_KERNEL);
12750         if (tmp == NULL)
12751                 return -ENOMEM;
12752
12753         while (len) {
12754                 int j;
12755                 u32 phy_addr, page_off, size;
12756
12757                 phy_addr = offset & ~pagemask;
12758
12759                 for (j = 0; j < pagesize; j += 4) {
12760                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12761                                                   (__be32 *) (tmp + j));
12762                         if (ret)
12763                                 break;
12764                 }
12765                 if (ret)
12766                         break;
12767
12768                 page_off = offset & pagemask;
12769                 size = pagesize;
12770                 if (len < size)
12771                         size = len;
12772
12773                 len -= size;
12774
12775                 memcpy(tmp + page_off, buf, size);
12776
12777                 offset = offset + (pagesize - page_off);
12778
12779                 tg3_enable_nvram_access(tp);
12780
12781                 /*
12782                  * Before we can erase the flash page, we need
12783                  * to issue a special "write enable" command.
12784                  */
12785                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12786
12787                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12788                         break;
12789
12790                 /* Erase the target page */
12791                 tw32(NVRAM_ADDR, phy_addr);
12792
12793                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12794                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12795
12796                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12797                         break;
12798
12799                 /* Issue another write enable to start the write. */
12800                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12801
12802                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12803                         break;
12804
12805                 for (j = 0; j < pagesize; j += 4) {
12806                         __be32 data;
12807
12808                         data = *((__be32 *) (tmp + j));
12809
12810                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12811
12812                         tw32(NVRAM_ADDR, phy_addr + j);
12813
12814                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12815                                 NVRAM_CMD_WR;
12816
12817                         if (j == 0)
12818                                 nvram_cmd |= NVRAM_CMD_FIRST;
12819                         else if (j == (pagesize - 4))
12820                                 nvram_cmd |= NVRAM_CMD_LAST;
12821
12822                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12823                                 break;
12824                 }
12825                 if (ret)
12826                         break;
12827         }
12828
12829         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12830         tg3_nvram_exec_cmd(tp, nvram_cmd);
12831
12832         kfree(tmp);
12833
12834         return ret;
12835 }
12836
12837 /* offset and length are dword aligned */
12838 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12839                 u8 *buf)
12840 {
12841         int i, ret = 0;
12842
12843         for (i = 0; i < len; i += 4, offset += 4) {
12844                 u32 page_off, phy_addr, nvram_cmd;
12845                 __be32 data;
12846
12847                 memcpy(&data, buf + i, 4);
12848                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12849
12850                 page_off = offset % tp->nvram_pagesize;
12851
12852                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12853
12854                 tw32(NVRAM_ADDR, phy_addr);
12855
12856                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12857
12858                 if (page_off == 0 || i == 0)
12859                         nvram_cmd |= NVRAM_CMD_FIRST;
12860                 if (page_off == (tp->nvram_pagesize - 4))
12861                         nvram_cmd |= NVRAM_CMD_LAST;
12862
12863                 if (i == (len - 4))
12864                         nvram_cmd |= NVRAM_CMD_LAST;
12865
12866                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12867                     !tg3_flag(tp, 5755_PLUS) &&
12868                     (tp->nvram_jedecnum == JEDEC_ST) &&
12869                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12870
12871                         if ((ret = tg3_nvram_exec_cmd(tp,
12872                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12873                                 NVRAM_CMD_DONE)))
12874
12875                                 break;
12876                 }
12877                 if (!tg3_flag(tp, FLASH)) {
12878                         /* We always do complete word writes to eeprom. */
12879                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12880                 }
12881
12882                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12883                         break;
12884         }
12885         return ret;
12886 }
12887
12888 /* offset and length are dword aligned */
12889 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12890 {
12891         int ret;
12892
12893         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12894                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12895                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12896                 udelay(40);
12897         }
12898
12899         if (!tg3_flag(tp, NVRAM)) {
12900                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12901         } else {
12902                 u32 grc_mode;
12903
12904                 ret = tg3_nvram_lock(tp);
12905                 if (ret)
12906                         return ret;
12907
12908                 tg3_enable_nvram_access(tp);
12909                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12910                         tw32(NVRAM_WRITE1, 0x406);
12911
12912                 grc_mode = tr32(GRC_MODE);
12913                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12914
12915                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12916                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12917                                 buf);
12918                 } else {
12919                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12920                                 buf);
12921                 }
12922
12923                 grc_mode = tr32(GRC_MODE);
12924                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12925
12926                 tg3_disable_nvram_access(tp);
12927                 tg3_nvram_unlock(tp);
12928         }
12929
12930         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12931                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12932                 udelay(40);
12933         }
12934
12935         return ret;
12936 }
12937
12938 struct subsys_tbl_ent {
12939         u16 subsys_vendor, subsys_devid;
12940         u32 phy_id;
12941 };
12942
12943 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12944         /* Broadcom boards. */
12945         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12946           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12947         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12948           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12949         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12950           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12951         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12952           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12953         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12954           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12955         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12956           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12957         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12958           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12959         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12960           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12961         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12962           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12963         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12964           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12965         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12966           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12967
12968         /* 3com boards. */
12969         { TG3PCI_SUBVENDOR_ID_3COM,
12970           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12971         { TG3PCI_SUBVENDOR_ID_3COM,
12972           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12973         { TG3PCI_SUBVENDOR_ID_3COM,
12974           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12975         { TG3PCI_SUBVENDOR_ID_3COM,
12976           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12977         { TG3PCI_SUBVENDOR_ID_3COM,
12978           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12979
12980         /* DELL boards. */
12981         { TG3PCI_SUBVENDOR_ID_DELL,
12982           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12983         { TG3PCI_SUBVENDOR_ID_DELL,
12984           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12985         { TG3PCI_SUBVENDOR_ID_DELL,
12986           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12987         { TG3PCI_SUBVENDOR_ID_DELL,
12988           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12989
12990         /* Compaq boards. */
12991         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12992           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12993         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12994           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12995         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12996           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12997         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12998           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12999         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13000           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13001
13002         /* IBM boards. */
13003         { TG3PCI_SUBVENDOR_ID_IBM,
13004           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13005 };
13006
13007 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13008 {
13009         int i;
13010
13011         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13012                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13013                      tp->pdev->subsystem_vendor) &&
13014                     (subsys_id_to_phy_id[i].subsys_devid ==
13015                      tp->pdev->subsystem_device))
13016                         return &subsys_id_to_phy_id[i];
13017         }
13018         return NULL;
13019 }
13020
13021 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13022 {
13023         u32 val;
13024
13025         tp->phy_id = TG3_PHY_ID_INVALID;
13026         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13027
13028         /* Assume an onboard device and WOL capable by default.  */
13029         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13030         tg3_flag_set(tp, WOL_CAP);
13031
13032         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13033                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13034                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13035                         tg3_flag_set(tp, IS_NIC);
13036                 }
13037                 val = tr32(VCPU_CFGSHDW);
13038                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13039                         tg3_flag_set(tp, ASPM_WORKAROUND);
13040                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13041                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13042                         tg3_flag_set(tp, WOL_ENABLE);
13043                         device_set_wakeup_enable(&tp->pdev->dev, true);
13044                 }
13045                 goto done;
13046         }
13047
13048         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13049         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13050                 u32 nic_cfg, led_cfg;
13051                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13052                 int eeprom_phy_serdes = 0;
13053
13054                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13055                 tp->nic_sram_data_cfg = nic_cfg;
13056
13057                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13058                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13060                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13061                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13062                     (ver > 0) && (ver < 0x100))
13063                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13064
13065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13066                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13067
13068                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13069                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13070                         eeprom_phy_serdes = 1;
13071
13072                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13073                 if (nic_phy_id != 0) {
13074                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13075                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13076
13077                         eeprom_phy_id  = (id1 >> 16) << 10;
13078                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13079                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13080                 } else
13081                         eeprom_phy_id = 0;
13082
13083                 tp->phy_id = eeprom_phy_id;
13084                 if (eeprom_phy_serdes) {
13085                         if (!tg3_flag(tp, 5705_PLUS))
13086                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13087                         else
13088                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13089                 }
13090
13091                 if (tg3_flag(tp, 5750_PLUS))
13092                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13093                                     SHASTA_EXT_LED_MODE_MASK);
13094                 else
13095                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13096
13097                 switch (led_cfg) {
13098                 default:
13099                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13100                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13101                         break;
13102
13103                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13104                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13105                         break;
13106
13107                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13108                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13109
13110                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13111                          * read on some older 5700/5701 bootcode.
13112                          */
13113                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13114                             ASIC_REV_5700 ||
13115                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13116                             ASIC_REV_5701)
13117                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13118
13119                         break;
13120
13121                 case SHASTA_EXT_LED_SHARED:
13122                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13123                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13124                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13125                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13126                                                  LED_CTRL_MODE_PHY_2);
13127                         break;
13128
13129                 case SHASTA_EXT_LED_MAC:
13130                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13131                         break;
13132
13133                 case SHASTA_EXT_LED_COMBO:
13134                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13135                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13136                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13137                                                  LED_CTRL_MODE_PHY_2);
13138                         break;
13139
13140                 }
13141
13142                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13143                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13144                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13145                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13146
13147                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13148                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13149
13150                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13151                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13152                         if ((tp->pdev->subsystem_vendor ==
13153                              PCI_VENDOR_ID_ARIMA) &&
13154                             (tp->pdev->subsystem_device == 0x205a ||
13155                              tp->pdev->subsystem_device == 0x2063))
13156                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13157                 } else {
13158                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13159                         tg3_flag_set(tp, IS_NIC);
13160                 }
13161
13162                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13163                         tg3_flag_set(tp, ENABLE_ASF);
13164                         if (tg3_flag(tp, 5750_PLUS))
13165                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13166                 }
13167
13168                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13169                     tg3_flag(tp, 5750_PLUS))
13170                         tg3_flag_set(tp, ENABLE_APE);
13171
13172                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13173                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13174                         tg3_flag_clear(tp, WOL_CAP);
13175
13176                 if (tg3_flag(tp, WOL_CAP) &&
13177                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13178                         tg3_flag_set(tp, WOL_ENABLE);
13179                         device_set_wakeup_enable(&tp->pdev->dev, true);
13180                 }
13181
13182                 if (cfg2 & (1 << 17))
13183                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13184
13185                 /* serdes signal pre-emphasis in register 0x590 set by */
13186                 /* bootcode if bit 18 is set */
13187                 if (cfg2 & (1 << 18))
13188                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13189
13190                 if ((tg3_flag(tp, 57765_PLUS) ||
13191                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13192                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13193                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13194                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13195
13196                 if (tg3_flag(tp, PCI_EXPRESS) &&
13197                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13198                     !tg3_flag(tp, 57765_PLUS)) {
13199                         u32 cfg3;
13200
13201                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13202                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13203                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13204                 }
13205
13206                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13207                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13208                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13209                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13210                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13211                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13212         }
13213 done:
13214         if (tg3_flag(tp, WOL_CAP))
13215                 device_set_wakeup_enable(&tp->pdev->dev,
13216                                          tg3_flag(tp, WOL_ENABLE));
13217         else
13218                 device_set_wakeup_capable(&tp->pdev->dev, false);
13219 }
13220
13221 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13222 {
13223         int i;
13224         u32 val;
13225
13226         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13227         tw32(OTP_CTRL, cmd);
13228
13229         /* Wait for up to 1 ms for command to execute. */
13230         for (i = 0; i < 100; i++) {
13231                 val = tr32(OTP_STATUS);
13232                 if (val & OTP_STATUS_CMD_DONE)
13233                         break;
13234                 udelay(10);
13235         }
13236
13237         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13238 }
13239
13240 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13241  * configuration is a 32-bit value that straddles the alignment boundary.
13242  * We do two 32-bit reads and then shift and merge the results.
13243  */
13244 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13245 {
13246         u32 bhalf_otp, thalf_otp;
13247
13248         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13249
13250         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13251                 return 0;
13252
13253         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13254
13255         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13256                 return 0;
13257
13258         thalf_otp = tr32(OTP_READ_DATA);
13259
13260         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13261
13262         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13263                 return 0;
13264
13265         bhalf_otp = tr32(OTP_READ_DATA);
13266
13267         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13268 }
13269
13270 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13271 {
13272         u32 adv = ADVERTISED_Autoneg |
13273                   ADVERTISED_Pause;
13274
13275         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13276                 adv |= ADVERTISED_1000baseT_Half |
13277                        ADVERTISED_1000baseT_Full;
13278
13279         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13280                 adv |= ADVERTISED_100baseT_Half |
13281                        ADVERTISED_100baseT_Full |
13282                        ADVERTISED_10baseT_Half |
13283                        ADVERTISED_10baseT_Full |
13284                        ADVERTISED_TP;
13285         else
13286                 adv |= ADVERTISED_FIBRE;
13287
13288         tp->link_config.advertising = adv;
13289         tp->link_config.speed = SPEED_INVALID;
13290         tp->link_config.duplex = DUPLEX_INVALID;
13291         tp->link_config.autoneg = AUTONEG_ENABLE;
13292         tp->link_config.active_speed = SPEED_INVALID;
13293         tp->link_config.active_duplex = DUPLEX_INVALID;
13294         tp->link_config.orig_speed = SPEED_INVALID;
13295         tp->link_config.orig_duplex = DUPLEX_INVALID;
13296         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13297 }
13298
13299 static int __devinit tg3_phy_probe(struct tg3 *tp)
13300 {
13301         u32 hw_phy_id_1, hw_phy_id_2;
13302         u32 hw_phy_id, hw_phy_id_masked;
13303         int err;
13304
13305         /* flow control autonegotiation is default behavior */
13306         tg3_flag_set(tp, PAUSE_AUTONEG);
13307         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13308
13309         if (tg3_flag(tp, USE_PHYLIB))
13310                 return tg3_phy_init(tp);
13311
13312         /* Reading the PHY ID register can conflict with ASF
13313          * firmware access to the PHY hardware.
13314          */
13315         err = 0;
13316         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13317                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13318         } else {
13319                 /* Now read the physical PHY_ID from the chip and verify
13320                  * that it is sane.  If it doesn't look good, we fall back
13321                  * to either the hard-coded table based PHY_ID and failing
13322                  * that the value found in the eeprom area.
13323                  */
13324                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13325                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13326
13327                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13328                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13329                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13330
13331                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13332         }
13333
13334         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13335                 tp->phy_id = hw_phy_id;
13336                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13337                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13338                 else
13339                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13340         } else {
13341                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13342                         /* Do nothing, phy ID already set up in
13343                          * tg3_get_eeprom_hw_cfg().
13344                          */
13345                 } else {
13346                         struct subsys_tbl_ent *p;
13347
13348                         /* No eeprom signature?  Try the hardcoded
13349                          * subsys device table.
13350                          */
13351                         p = tg3_lookup_by_subsys(tp);
13352                         if (!p)
13353                                 return -ENODEV;
13354
13355                         tp->phy_id = p->phy_id;
13356                         if (!tp->phy_id ||
13357                             tp->phy_id == TG3_PHY_ID_BCM8002)
13358                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13359                 }
13360         }
13361
13362         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13363             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13364              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13365              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13366               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13367              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13368               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13369                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13370
13371         tg3_phy_init_link_config(tp);
13372
13373         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13374             !tg3_flag(tp, ENABLE_APE) &&
13375             !tg3_flag(tp, ENABLE_ASF)) {
13376                 u32 bmsr, mask;
13377
13378                 tg3_readphy(tp, MII_BMSR, &bmsr);
13379                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13380                     (bmsr & BMSR_LSTATUS))
13381                         goto skip_phy_reset;
13382
13383                 err = tg3_phy_reset(tp);
13384                 if (err)
13385                         return err;
13386
13387                 tg3_phy_set_wirespeed(tp);
13388
13389                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13390                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13391                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13392                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13393                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13394                                             tp->link_config.flowctrl);
13395
13396                         tg3_writephy(tp, MII_BMCR,
13397                                      BMCR_ANENABLE | BMCR_ANRESTART);
13398                 }
13399         }
13400
13401 skip_phy_reset:
13402         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13403                 err = tg3_init_5401phy_dsp(tp);
13404                 if (err)
13405                         return err;
13406
13407                 err = tg3_init_5401phy_dsp(tp);
13408         }
13409
13410         return err;
13411 }
13412
13413 static void __devinit tg3_read_vpd(struct tg3 *tp)
13414 {
13415         u8 *vpd_data;
13416         unsigned int block_end, rosize, len;
13417         u32 vpdlen;
13418         int j, i = 0;
13419
13420         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13421         if (!vpd_data)
13422                 goto out_no_vpd;
13423
13424         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13425         if (i < 0)
13426                 goto out_not_found;
13427
13428         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13429         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13430         i += PCI_VPD_LRDT_TAG_SIZE;
13431
13432         if (block_end > vpdlen)
13433                 goto out_not_found;
13434
13435         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13436                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13437         if (j > 0) {
13438                 len = pci_vpd_info_field_size(&vpd_data[j]);
13439
13440                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13441                 if (j + len > block_end || len != 4 ||
13442                     memcmp(&vpd_data[j], "1028", 4))
13443                         goto partno;
13444
13445                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13446                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13447                 if (j < 0)
13448                         goto partno;
13449
13450                 len = pci_vpd_info_field_size(&vpd_data[j]);
13451
13452                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13453                 if (j + len > block_end)
13454                         goto partno;
13455
13456                 if (len >= sizeof(tp->fw_ver))
13457                         len = sizeof(tp->fw_ver) - 1;
13458                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
13459                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
13460                          &vpd_data[j]);
13461         }
13462
13463 partno:
13464         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13465                                       PCI_VPD_RO_KEYWORD_PARTNO);
13466         if (i < 0)
13467                 goto out_not_found;
13468
13469         len = pci_vpd_info_field_size(&vpd_data[i]);
13470
13471         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13472         if (len > TG3_BPN_SIZE ||
13473             (len + i) > vpdlen)
13474                 goto out_not_found;
13475
13476         memcpy(tp->board_part_number, &vpd_data[i], len);
13477
13478 out_not_found:
13479         kfree(vpd_data);
13480         if (tp->board_part_number[0])
13481                 return;
13482
13483 out_no_vpd:
13484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13485                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13486                         strcpy(tp->board_part_number, "BCM5717");
13487                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13488                         strcpy(tp->board_part_number, "BCM5718");
13489                 else
13490                         goto nomatch;
13491         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13492                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13493                         strcpy(tp->board_part_number, "BCM57780");
13494                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13495                         strcpy(tp->board_part_number, "BCM57760");
13496                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13497                         strcpy(tp->board_part_number, "BCM57790");
13498                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13499                         strcpy(tp->board_part_number, "BCM57788");
13500                 else
13501                         goto nomatch;
13502         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13503                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13504                         strcpy(tp->board_part_number, "BCM57761");
13505                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13506                         strcpy(tp->board_part_number, "BCM57765");
13507                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13508                         strcpy(tp->board_part_number, "BCM57781");
13509                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13510                         strcpy(tp->board_part_number, "BCM57785");
13511                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13512                         strcpy(tp->board_part_number, "BCM57791");
13513                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13514                         strcpy(tp->board_part_number, "BCM57795");
13515                 else
13516                         goto nomatch;
13517         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13518                 strcpy(tp->board_part_number, "BCM95906");
13519         } else {
13520 nomatch:
13521                 strcpy(tp->board_part_number, "none");
13522         }
13523 }
13524
13525 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13526 {
13527         u32 val;
13528
13529         if (tg3_nvram_read(tp, offset, &val) ||
13530             (val & 0xfc000000) != 0x0c000000 ||
13531             tg3_nvram_read(tp, offset + 4, &val) ||
13532             val != 0)
13533                 return 0;
13534
13535         return 1;
13536 }
13537
13538 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13539 {
13540         u32 val, offset, start, ver_offset;
13541         int i, dst_off;
13542         bool newver = false;
13543
13544         if (tg3_nvram_read(tp, 0xc, &offset) ||
13545             tg3_nvram_read(tp, 0x4, &start))
13546                 return;
13547
13548         offset = tg3_nvram_logical_addr(tp, offset);
13549
13550         if (tg3_nvram_read(tp, offset, &val))
13551                 return;
13552
13553         if ((val & 0xfc000000) == 0x0c000000) {
13554                 if (tg3_nvram_read(tp, offset + 4, &val))
13555                         return;
13556
13557                 if (val == 0)
13558                         newver = true;
13559         }
13560
13561         dst_off = strlen(tp->fw_ver);
13562
13563         if (newver) {
13564                 if (TG3_VER_SIZE - dst_off < 16 ||
13565                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13566                         return;
13567
13568                 offset = offset + ver_offset - start;
13569                 for (i = 0; i < 16; i += 4) {
13570                         __be32 v;
13571                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13572                                 return;
13573
13574                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13575                 }
13576         } else {
13577                 u32 major, minor;
13578
13579                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13580                         return;
13581
13582                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13583                         TG3_NVM_BCVER_MAJSFT;
13584                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13585                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13586                          "v%d.%02d", major, minor);
13587         }
13588 }
13589
13590 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13591 {
13592         u32 val, major, minor;
13593
13594         /* Use native endian representation */
13595         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13596                 return;
13597
13598         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13599                 TG3_NVM_HWSB_CFG1_MAJSFT;
13600         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13601                 TG3_NVM_HWSB_CFG1_MINSFT;
13602
13603         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13604 }
13605
13606 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13607 {
13608         u32 offset, major, minor, build;
13609
13610         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13611
13612         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13613                 return;
13614
13615         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13616         case TG3_EEPROM_SB_REVISION_0:
13617                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13618                 break;
13619         case TG3_EEPROM_SB_REVISION_2:
13620                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13621                 break;
13622         case TG3_EEPROM_SB_REVISION_3:
13623                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13624                 break;
13625         case TG3_EEPROM_SB_REVISION_4:
13626                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13627                 break;
13628         case TG3_EEPROM_SB_REVISION_5:
13629                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13630                 break;
13631         case TG3_EEPROM_SB_REVISION_6:
13632                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13633                 break;
13634         default:
13635                 return;
13636         }
13637
13638         if (tg3_nvram_read(tp, offset, &val))
13639                 return;
13640
13641         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13642                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13643         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13644                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13645         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13646
13647         if (minor > 99 || build > 26)
13648                 return;
13649
13650         offset = strlen(tp->fw_ver);
13651         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13652                  " v%d.%02d", major, minor);
13653
13654         if (build > 0) {
13655                 offset = strlen(tp->fw_ver);
13656                 if (offset < TG3_VER_SIZE - 1)
13657                         tp->fw_ver[offset] = 'a' + build - 1;
13658         }
13659 }
13660
13661 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13662 {
13663         u32 val, offset, start;
13664         int i, vlen;
13665
13666         for (offset = TG3_NVM_DIR_START;
13667              offset < TG3_NVM_DIR_END;
13668              offset += TG3_NVM_DIRENT_SIZE) {
13669                 if (tg3_nvram_read(tp, offset, &val))
13670                         return;
13671
13672                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13673                         break;
13674         }
13675
13676         if (offset == TG3_NVM_DIR_END)
13677                 return;
13678
13679         if (!tg3_flag(tp, 5705_PLUS))
13680                 start = 0x08000000;
13681         else if (tg3_nvram_read(tp, offset - 4, &start))
13682                 return;
13683
13684         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13685             !tg3_fw_img_is_valid(tp, offset) ||
13686             tg3_nvram_read(tp, offset + 8, &val))
13687                 return;
13688
13689         offset += val - start;
13690
13691         vlen = strlen(tp->fw_ver);
13692
13693         tp->fw_ver[vlen++] = ',';
13694         tp->fw_ver[vlen++] = ' ';
13695
13696         for (i = 0; i < 4; i++) {
13697                 __be32 v;
13698                 if (tg3_nvram_read_be32(tp, offset, &v))
13699                         return;
13700
13701                 offset += sizeof(v);
13702
13703                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13704                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13705                         break;
13706                 }
13707
13708                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13709                 vlen += sizeof(v);
13710         }
13711 }
13712
13713 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13714 {
13715         int vlen;
13716         u32 apedata;
13717         char *fwtype;
13718
13719         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13720                 return;
13721
13722         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13723         if (apedata != APE_SEG_SIG_MAGIC)
13724                 return;
13725
13726         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13727         if (!(apedata & APE_FW_STATUS_READY))
13728                 return;
13729
13730         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13731
13732         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13733                 tg3_flag_set(tp, APE_HAS_NCSI);
13734                 fwtype = "NCSI";
13735         } else {
13736                 fwtype = "DASH";
13737         }
13738
13739         vlen = strlen(tp->fw_ver);
13740
13741         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13742                  fwtype,
13743                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13744                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13745                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13746                  (apedata & APE_FW_VERSION_BLDMSK));
13747 }
13748
13749 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13750 {
13751         u32 val;
13752         bool vpd_vers = false;
13753
13754         if (tp->fw_ver[0] != 0)
13755                 vpd_vers = true;
13756
13757         if (tg3_flag(tp, NO_NVRAM)) {
13758                 strcat(tp->fw_ver, "sb");
13759                 return;
13760         }
13761
13762         if (tg3_nvram_read(tp, 0, &val))
13763                 return;
13764
13765         if (val == TG3_EEPROM_MAGIC)
13766                 tg3_read_bc_ver(tp);
13767         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13768                 tg3_read_sb_ver(tp, val);
13769         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13770                 tg3_read_hwsb_ver(tp);
13771         else
13772                 return;
13773
13774         if (vpd_vers)
13775                 goto done;
13776
13777         if (tg3_flag(tp, ENABLE_APE)) {
13778                 if (tg3_flag(tp, ENABLE_ASF))
13779                         tg3_read_dash_ver(tp);
13780         } else if (tg3_flag(tp, ENABLE_ASF)) {
13781                 tg3_read_mgmtfw_ver(tp);
13782         }
13783
13784 done:
13785         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13786 }
13787
13788 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13789
13790 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13791 {
13792         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13793                 return TG3_RX_RET_MAX_SIZE_5717;
13794         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13795                 return TG3_RX_RET_MAX_SIZE_5700;
13796         else
13797                 return TG3_RX_RET_MAX_SIZE_5705;
13798 }
13799
13800 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13801         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13802         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13803         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13804         { },
13805 };
13806
13807 static int __devinit tg3_get_invariants(struct tg3 *tp)
13808 {
13809         u32 misc_ctrl_reg;
13810         u32 pci_state_reg, grc_misc_cfg;
13811         u32 val;
13812         u16 pci_cmd;
13813         int err;
13814
13815         /* Force memory write invalidate off.  If we leave it on,
13816          * then on 5700_BX chips we have to enable a workaround.
13817          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13818          * to match the cacheline size.  The Broadcom driver have this
13819          * workaround but turns MWI off all the times so never uses
13820          * it.  This seems to suggest that the workaround is insufficient.
13821          */
13822         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13823         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13824         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13825
13826         /* Important! -- Make sure register accesses are byteswapped
13827          * correctly.  Also, for those chips that require it, make
13828          * sure that indirect register accesses are enabled before
13829          * the first operation.
13830          */
13831         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13832                               &misc_ctrl_reg);
13833         tp->misc_host_ctrl |= (misc_ctrl_reg &
13834                                MISC_HOST_CTRL_CHIPREV);
13835         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13836                                tp->misc_host_ctrl);
13837
13838         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13839                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13841                 u32 prod_id_asic_rev;
13842
13843                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13844                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13845                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13846                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13847                         pci_read_config_dword(tp->pdev,
13848                                               TG3PCI_GEN2_PRODID_ASICREV,
13849                                               &prod_id_asic_rev);
13850                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13851                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13852                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13853                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13854                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13855                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13856                         pci_read_config_dword(tp->pdev,
13857                                               TG3PCI_GEN15_PRODID_ASICREV,
13858                                               &prod_id_asic_rev);
13859                 else
13860                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13861                                               &prod_id_asic_rev);
13862
13863                 tp->pci_chip_rev_id = prod_id_asic_rev;
13864         }
13865
13866         /* Wrong chip ID in 5752 A0. This code can be removed later
13867          * as A0 is not in production.
13868          */
13869         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13870                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13871
13872         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13873          * we need to disable memory and use config. cycles
13874          * only to access all registers. The 5702/03 chips
13875          * can mistakenly decode the special cycles from the
13876          * ICH chipsets as memory write cycles, causing corruption
13877          * of register and memory space. Only certain ICH bridges
13878          * will drive special cycles with non-zero data during the
13879          * address phase which can fall within the 5703's address
13880          * range. This is not an ICH bug as the PCI spec allows
13881          * non-zero address during special cycles. However, only
13882          * these ICH bridges are known to drive non-zero addresses
13883          * during special cycles.
13884          *
13885          * Since special cycles do not cross PCI bridges, we only
13886          * enable this workaround if the 5703 is on the secondary
13887          * bus of these ICH bridges.
13888          */
13889         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13890             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13891                 static struct tg3_dev_id {
13892                         u32     vendor;
13893                         u32     device;
13894                         u32     rev;
13895                 } ich_chipsets[] = {
13896                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13897                           PCI_ANY_ID },
13898                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13899                           PCI_ANY_ID },
13900                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13901                           0xa },
13902                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13903                           PCI_ANY_ID },
13904                         { },
13905                 };
13906                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13907                 struct pci_dev *bridge = NULL;
13908
13909                 while (pci_id->vendor != 0) {
13910                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13911                                                 bridge);
13912                         if (!bridge) {
13913                                 pci_id++;
13914                                 continue;
13915                         }
13916                         if (pci_id->rev != PCI_ANY_ID) {
13917                                 if (bridge->revision > pci_id->rev)
13918                                         continue;
13919                         }
13920                         if (bridge->subordinate &&
13921                             (bridge->subordinate->number ==
13922                              tp->pdev->bus->number)) {
13923                                 tg3_flag_set(tp, ICH_WORKAROUND);
13924                                 pci_dev_put(bridge);
13925                                 break;
13926                         }
13927                 }
13928         }
13929
13930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13931                 static struct tg3_dev_id {
13932                         u32     vendor;
13933                         u32     device;
13934                 } bridge_chipsets[] = {
13935                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13936                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13937                         { },
13938                 };
13939                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13940                 struct pci_dev *bridge = NULL;
13941
13942                 while (pci_id->vendor != 0) {
13943                         bridge = pci_get_device(pci_id->vendor,
13944                                                 pci_id->device,
13945                                                 bridge);
13946                         if (!bridge) {
13947                                 pci_id++;
13948                                 continue;
13949                         }
13950                         if (bridge->subordinate &&
13951                             (bridge->subordinate->number <=
13952                              tp->pdev->bus->number) &&
13953                             (bridge->subordinate->subordinate >=
13954                              tp->pdev->bus->number)) {
13955                                 tg3_flag_set(tp, 5701_DMA_BUG);
13956                                 pci_dev_put(bridge);
13957                                 break;
13958                         }
13959                 }
13960         }
13961
13962         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13963          * DMA addresses > 40-bit. This bridge may have other additional
13964          * 57xx devices behind it in some 4-port NIC designs for example.
13965          * Any tg3 device found behind the bridge will also need the 40-bit
13966          * DMA workaround.
13967          */
13968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13970                 tg3_flag_set(tp, 5780_CLASS);
13971                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13972                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13973         } else {
13974                 struct pci_dev *bridge = NULL;
13975
13976                 do {
13977                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13978                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13979                                                 bridge);
13980                         if (bridge && bridge->subordinate &&
13981                             (bridge->subordinate->number <=
13982                              tp->pdev->bus->number) &&
13983                             (bridge->subordinate->subordinate >=
13984                              tp->pdev->bus->number)) {
13985                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13986                                 pci_dev_put(bridge);
13987                                 break;
13988                         }
13989                 } while (bridge);
13990         }
13991
13992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13993             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13994                 tp->pdev_peer = tg3_find_peer(tp);
13995
13996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13998             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13999                 tg3_flag_set(tp, 5717_PLUS);
14000
14001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14002             tg3_flag(tp, 5717_PLUS))
14003                 tg3_flag_set(tp, 57765_PLUS);
14004
14005         /* Intentionally exclude ASIC_REV_5906 */
14006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14007             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14008             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14011             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14012             tg3_flag(tp, 57765_PLUS))
14013                 tg3_flag_set(tp, 5755_PLUS);
14014
14015         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14016             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14018             tg3_flag(tp, 5755_PLUS) ||
14019             tg3_flag(tp, 5780_CLASS))
14020                 tg3_flag_set(tp, 5750_PLUS);
14021
14022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14023             tg3_flag(tp, 5750_PLUS))
14024                 tg3_flag_set(tp, 5705_PLUS);
14025
14026         /* Determine TSO capabilities */
14027         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14028                 ; /* Do nothing. HW bug. */
14029         else if (tg3_flag(tp, 57765_PLUS))
14030                 tg3_flag_set(tp, HW_TSO_3);
14031         else if (tg3_flag(tp, 5755_PLUS) ||
14032                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14033                 tg3_flag_set(tp, HW_TSO_2);
14034         else if (tg3_flag(tp, 5750_PLUS)) {
14035                 tg3_flag_set(tp, HW_TSO_1);
14036                 tg3_flag_set(tp, TSO_BUG);
14037                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14038                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14039                         tg3_flag_clear(tp, TSO_BUG);
14040         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14041                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14042                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14043                         tg3_flag_set(tp, TSO_BUG);
14044                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14045                         tp->fw_needed = FIRMWARE_TG3TSO5;
14046                 else
14047                         tp->fw_needed = FIRMWARE_TG3TSO;
14048         }
14049
14050         /* Selectively allow TSO based on operating conditions */
14051         if (tg3_flag(tp, HW_TSO_1) ||
14052             tg3_flag(tp, HW_TSO_2) ||
14053             tg3_flag(tp, HW_TSO_3) ||
14054             tp->fw_needed) {
14055                 /* For firmware TSO, assume ASF is disabled.
14056                  * We'll disable TSO later if we discover ASF
14057                  * is enabled in tg3_get_eeprom_hw_cfg().
14058                  */
14059                 tg3_flag_set(tp, TSO_CAPABLE);
14060         } else {
14061                 tg3_flag_clear(tp, TSO_CAPABLE);
14062                 tg3_flag_clear(tp, TSO_BUG);
14063                 tp->fw_needed = NULL;
14064         }
14065
14066         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14067                 tp->fw_needed = FIRMWARE_TG3;
14068
14069         tp->irq_max = 1;
14070
14071         if (tg3_flag(tp, 5750_PLUS)) {
14072                 tg3_flag_set(tp, SUPPORT_MSI);
14073                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14074                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14075                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14076                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14077                      tp->pdev_peer == tp->pdev))
14078                         tg3_flag_clear(tp, SUPPORT_MSI);
14079
14080                 if (tg3_flag(tp, 5755_PLUS) ||
14081                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14082                         tg3_flag_set(tp, 1SHOT_MSI);
14083                 }
14084
14085                 if (tg3_flag(tp, 57765_PLUS)) {
14086                         tg3_flag_set(tp, SUPPORT_MSIX);
14087                         tp->irq_max = TG3_IRQ_MAX_VECS;
14088                 }
14089         }
14090
14091         if (tg3_flag(tp, 5755_PLUS) ||
14092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14093                 tg3_flag_set(tp, SHORT_DMA_BUG);
14094
14095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14096                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14097
14098         if (tg3_flag(tp, 5717_PLUS))
14099                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14100
14101         if (tg3_flag(tp, 57765_PLUS) &&
14102             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14103                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14104
14105         if (!tg3_flag(tp, 5705_PLUS) ||
14106             tg3_flag(tp, 5780_CLASS) ||
14107             tg3_flag(tp, USE_JUMBO_BDFLAG))
14108                 tg3_flag_set(tp, JUMBO_CAPABLE);
14109
14110         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14111                               &pci_state_reg);
14112
14113         if (pci_is_pcie(tp->pdev)) {
14114                 u16 lnkctl;
14115
14116                 tg3_flag_set(tp, PCI_EXPRESS);
14117
14118                 tp->pcie_readrq = 4096;
14119                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14120                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14121                         tp->pcie_readrq = 2048;
14122
14123                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14124
14125                 pci_read_config_word(tp->pdev,
14126                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14127                                      &lnkctl);
14128                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14129                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14130                             ASIC_REV_5906) {
14131                                 tg3_flag_clear(tp, HW_TSO_2);
14132                                 tg3_flag_clear(tp, TSO_CAPABLE);
14133                         }
14134                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14135                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14136                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14137                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14138                                 tg3_flag_set(tp, CLKREQ_BUG);
14139                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14140                         tg3_flag_set(tp, L1PLLPD_EN);
14141                 }
14142         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14143                 /* BCM5785 devices are effectively PCIe devices, and should
14144                  * follow PCIe codepaths, but do not have a PCIe capabilities
14145                  * section.
14146                  */
14147                 tg3_flag_set(tp, PCI_EXPRESS);
14148         } else if (!tg3_flag(tp, 5705_PLUS) ||
14149                    tg3_flag(tp, 5780_CLASS)) {
14150                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14151                 if (!tp->pcix_cap) {
14152                         dev_err(&tp->pdev->dev,
14153                                 "Cannot find PCI-X capability, aborting\n");
14154                         return -EIO;
14155                 }
14156
14157                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14158                         tg3_flag_set(tp, PCIX_MODE);
14159         }
14160
14161         /* If we have an AMD 762 or VIA K8T800 chipset, write
14162          * reordering to the mailbox registers done by the host
14163          * controller can cause major troubles.  We read back from
14164          * every mailbox register write to force the writes to be
14165          * posted to the chip in order.
14166          */
14167         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14168             !tg3_flag(tp, PCI_EXPRESS))
14169                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14170
14171         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14172                              &tp->pci_cacheline_sz);
14173         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14174                              &tp->pci_lat_timer);
14175         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14176             tp->pci_lat_timer < 64) {
14177                 tp->pci_lat_timer = 64;
14178                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14179                                       tp->pci_lat_timer);
14180         }
14181
14182         /* Important! -- It is critical that the PCI-X hw workaround
14183          * situation is decided before the first MMIO register access.
14184          */
14185         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14186                 /* 5700 BX chips need to have their TX producer index
14187                  * mailboxes written twice to workaround a bug.
14188                  */
14189                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14190
14191                 /* If we are in PCI-X mode, enable register write workaround.
14192                  *
14193                  * The workaround is to use indirect register accesses
14194                  * for all chip writes not to mailbox registers.
14195                  */
14196                 if (tg3_flag(tp, PCIX_MODE)) {
14197                         u32 pm_reg;
14198
14199                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14200
14201                         /* The chip can have it's power management PCI config
14202                          * space registers clobbered due to this bug.
14203                          * So explicitly force the chip into D0 here.
14204                          */
14205                         pci_read_config_dword(tp->pdev,
14206                                               tp->pm_cap + PCI_PM_CTRL,
14207                                               &pm_reg);
14208                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14209                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14210                         pci_write_config_dword(tp->pdev,
14211                                                tp->pm_cap + PCI_PM_CTRL,
14212                                                pm_reg);
14213
14214                         /* Also, force SERR#/PERR# in PCI command. */
14215                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14216                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14217                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14218                 }
14219         }
14220
14221         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14222                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14223         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14224                 tg3_flag_set(tp, PCI_32BIT);
14225
14226         /* Chip-specific fixup from Broadcom driver */
14227         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14228             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14229                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14230                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14231         }
14232
14233         /* Default fast path register access methods */
14234         tp->read32 = tg3_read32;
14235         tp->write32 = tg3_write32;
14236         tp->read32_mbox = tg3_read32;
14237         tp->write32_mbox = tg3_write32;
14238         tp->write32_tx_mbox = tg3_write32;
14239         tp->write32_rx_mbox = tg3_write32;
14240
14241         /* Various workaround register access methods */
14242         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14243                 tp->write32 = tg3_write_indirect_reg32;
14244         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14245                  (tg3_flag(tp, PCI_EXPRESS) &&
14246                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14247                 /*
14248                  * Back to back register writes can cause problems on these
14249                  * chips, the workaround is to read back all reg writes
14250                  * except those to mailbox regs.
14251                  *
14252                  * See tg3_write_indirect_reg32().
14253                  */
14254                 tp->write32 = tg3_write_flush_reg32;
14255         }
14256
14257         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14258                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14259                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14260                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14261         }
14262
14263         if (tg3_flag(tp, ICH_WORKAROUND)) {
14264                 tp->read32 = tg3_read_indirect_reg32;
14265                 tp->write32 = tg3_write_indirect_reg32;
14266                 tp->read32_mbox = tg3_read_indirect_mbox;
14267                 tp->write32_mbox = tg3_write_indirect_mbox;
14268                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14269                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14270
14271                 iounmap(tp->regs);
14272                 tp->regs = NULL;
14273
14274                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14275                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14276                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14277         }
14278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14279                 tp->read32_mbox = tg3_read32_mbox_5906;
14280                 tp->write32_mbox = tg3_write32_mbox_5906;
14281                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14282                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14283         }
14284
14285         if (tp->write32 == tg3_write_indirect_reg32 ||
14286             (tg3_flag(tp, PCIX_MODE) &&
14287              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14288               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14289                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14290
14291         /* The memory arbiter has to be enabled in order for SRAM accesses
14292          * to succeed.  Normally on powerup the tg3 chip firmware will make
14293          * sure it is enabled, but other entities such as system netboot
14294          * code might disable it.
14295          */
14296         val = tr32(MEMARB_MODE);
14297         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14298
14299         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14301             tg3_flag(tp, 5780_CLASS)) {
14302                 if (tg3_flag(tp, PCIX_MODE)) {
14303                         pci_read_config_dword(tp->pdev,
14304                                               tp->pcix_cap + PCI_X_STATUS,
14305                                               &val);
14306                         tp->pci_fn = val & 0x7;
14307                 }
14308         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14309                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14310                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14311                     NIC_SRAM_CPMUSTAT_SIG) {
14312                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14313                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14314                 }
14315         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14316                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14317                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14318                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14319                     NIC_SRAM_CPMUSTAT_SIG) {
14320                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14321                                      TG3_CPMU_STATUS_FSHFT_5719;
14322                 }
14323         }
14324
14325         /* Get eeprom hw config before calling tg3_set_power_state().
14326          * In particular, the TG3_FLAG_IS_NIC flag must be
14327          * determined before calling tg3_set_power_state() so that
14328          * we know whether or not to switch out of Vaux power.
14329          * When the flag is set, it means that GPIO1 is used for eeprom
14330          * write protect and also implies that it is a LOM where GPIOs
14331          * are not used to switch power.
14332          */
14333         tg3_get_eeprom_hw_cfg(tp);
14334
14335         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14336                 tg3_flag_clear(tp, TSO_CAPABLE);
14337                 tg3_flag_clear(tp, TSO_BUG);
14338                 tp->fw_needed = NULL;
14339         }
14340
14341         if (tg3_flag(tp, ENABLE_APE)) {
14342                 /* Allow reads and writes to the
14343                  * APE register and memory space.
14344                  */
14345                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14346                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14347                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14348                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14349                                        pci_state_reg);
14350
14351                 tg3_ape_lock_init(tp);
14352         }
14353
14354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14355             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14356             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14357             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14358             tg3_flag(tp, 57765_PLUS))
14359                 tg3_flag_set(tp, CPMU_PRESENT);
14360
14361         /* Set up tp->grc_local_ctrl before calling
14362          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14363          * will bring 5700's external PHY out of reset.
14364          * It is also used as eeprom write protect on LOMs.
14365          */
14366         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14368             tg3_flag(tp, EEPROM_WRITE_PROT))
14369                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14370                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14371         /* Unused GPIO3 must be driven as output on 5752 because there
14372          * are no pull-up resistors on unused GPIO pins.
14373          */
14374         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14375                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14376
14377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14380                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14381
14382         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14383             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14384                 /* Turn off the debug UART. */
14385                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14386                 if (tg3_flag(tp, IS_NIC))
14387                         /* Keep VMain power. */
14388                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14389                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14390         }
14391
14392         /* Switch out of Vaux if it is a NIC */
14393         tg3_pwrsrc_switch_to_vmain(tp);
14394
14395         /* Derive initial jumbo mode from MTU assigned in
14396          * ether_setup() via the alloc_etherdev() call
14397          */
14398         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14399                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14400
14401         /* Determine WakeOnLan speed to use. */
14402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14403             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14404             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14405             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14406                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14407         } else {
14408                 tg3_flag_set(tp, WOL_SPEED_100MB);
14409         }
14410
14411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14412                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14413
14414         /* A few boards don't want Ethernet@WireSpeed phy feature */
14415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14416             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14417              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14418              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14419             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14420             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14421                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14422
14423         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14424             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14425                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14426         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14427                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14428
14429         if (tg3_flag(tp, 5705_PLUS) &&
14430             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14431             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14432             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14433             !tg3_flag(tp, 57765_PLUS)) {
14434                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14435                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14436                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14437                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14438                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14439                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14440                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14441                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14442                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14443                 } else
14444                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14445         }
14446
14447         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14448             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14449                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14450                 if (tp->phy_otp == 0)
14451                         tp->phy_otp = TG3_OTP_DEFAULT;
14452         }
14453
14454         if (tg3_flag(tp, CPMU_PRESENT))
14455                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14456         else
14457                 tp->mi_mode = MAC_MI_MODE_BASE;
14458
14459         tp->coalesce_mode = 0;
14460         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14461             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14462                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14463
14464         /* Set these bits to enable statistics workaround. */
14465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14466             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14467             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14468                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14469                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14470         }
14471
14472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14474                 tg3_flag_set(tp, USE_PHYLIB);
14475
14476         err = tg3_mdio_init(tp);
14477         if (err)
14478                 return err;
14479
14480         /* Initialize data/descriptor byte/word swapping. */
14481         val = tr32(GRC_MODE);
14482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14483                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14484                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14485                         GRC_MODE_B2HRX_ENABLE |
14486                         GRC_MODE_HTX2B_ENABLE |
14487                         GRC_MODE_HOST_STACKUP);
14488         else
14489                 val &= GRC_MODE_HOST_STACKUP;
14490
14491         tw32(GRC_MODE, val | tp->grc_mode);
14492
14493         tg3_switch_clocks(tp);
14494
14495         /* Clear this out for sanity. */
14496         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14497
14498         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14499                               &pci_state_reg);
14500         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14501             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14502                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14503
14504                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14505                     chiprevid == CHIPREV_ID_5701_B0 ||
14506                     chiprevid == CHIPREV_ID_5701_B2 ||
14507                     chiprevid == CHIPREV_ID_5701_B5) {
14508                         void __iomem *sram_base;
14509
14510                         /* Write some dummy words into the SRAM status block
14511                          * area, see if it reads back correctly.  If the return
14512                          * value is bad, force enable the PCIX workaround.
14513                          */
14514                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14515
14516                         writel(0x00000000, sram_base);
14517                         writel(0x00000000, sram_base + 4);
14518                         writel(0xffffffff, sram_base + 4);
14519                         if (readl(sram_base) != 0x00000000)
14520                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14521                 }
14522         }
14523
14524         udelay(50);
14525         tg3_nvram_init(tp);
14526
14527         grc_misc_cfg = tr32(GRC_MISC_CFG);
14528         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14529
14530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14531             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14532              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14533                 tg3_flag_set(tp, IS_5788);
14534
14535         if (!tg3_flag(tp, IS_5788) &&
14536             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14537                 tg3_flag_set(tp, TAGGED_STATUS);
14538         if (tg3_flag(tp, TAGGED_STATUS)) {
14539                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14540                                       HOSTCC_MODE_CLRTICK_TXBD);
14541
14542                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14543                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14544                                        tp->misc_host_ctrl);
14545         }
14546
14547         /* Preserve the APE MAC_MODE bits */
14548         if (tg3_flag(tp, ENABLE_APE))
14549                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14550         else
14551                 tp->mac_mode = 0;
14552
14553         /* these are limited to 10/100 only */
14554         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14555              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14556             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14557              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14558              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14559               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14560               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14561             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14562              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14563               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14564               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14565             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14566             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14567             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14568             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14569                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14570
14571         err = tg3_phy_probe(tp);
14572         if (err) {
14573                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14574                 /* ... but do not return immediately ... */
14575                 tg3_mdio_fini(tp);
14576         }
14577
14578         tg3_read_vpd(tp);
14579         tg3_read_fw_ver(tp);
14580
14581         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14582                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14583         } else {
14584                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14585                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14586                 else
14587                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14588         }
14589
14590         /* 5700 {AX,BX} chips have a broken status block link
14591          * change bit implementation, so we must use the
14592          * status register in those cases.
14593          */
14594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14595                 tg3_flag_set(tp, USE_LINKCHG_REG);
14596         else
14597                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14598
14599         /* The led_ctrl is set during tg3_phy_probe, here we might
14600          * have to force the link status polling mechanism based
14601          * upon subsystem IDs.
14602          */
14603         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14604             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14605             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14606                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14607                 tg3_flag_set(tp, USE_LINKCHG_REG);
14608         }
14609
14610         /* For all SERDES we poll the MAC status register. */
14611         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14612                 tg3_flag_set(tp, POLL_SERDES);
14613         else
14614                 tg3_flag_clear(tp, POLL_SERDES);
14615
14616         tp->rx_offset = NET_IP_ALIGN;
14617         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14619             tg3_flag(tp, PCIX_MODE)) {
14620                 tp->rx_offset = 0;
14621 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14622                 tp->rx_copy_thresh = ~(u16)0;
14623 #endif
14624         }
14625
14626         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14627         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14628         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14629
14630         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14631
14632         /* Increment the rx prod index on the rx std ring by at most
14633          * 8 for these chips to workaround hw errata.
14634          */
14635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14638                 tp->rx_std_max_post = 8;
14639
14640         if (tg3_flag(tp, ASPM_WORKAROUND))
14641                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14642                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14643
14644         return err;
14645 }
14646
14647 #ifdef CONFIG_SPARC
14648 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14649 {
14650         struct net_device *dev = tp->dev;
14651         struct pci_dev *pdev = tp->pdev;
14652         struct device_node *dp = pci_device_to_OF_node(pdev);
14653         const unsigned char *addr;
14654         int len;
14655
14656         addr = of_get_property(dp, "local-mac-address", &len);
14657         if (addr && len == 6) {
14658                 memcpy(dev->dev_addr, addr, 6);
14659                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14660                 return 0;
14661         }
14662         return -ENODEV;
14663 }
14664
14665 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14666 {
14667         struct net_device *dev = tp->dev;
14668
14669         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14670         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14671         return 0;
14672 }
14673 #endif
14674
14675 static int __devinit tg3_get_device_address(struct tg3 *tp)
14676 {
14677         struct net_device *dev = tp->dev;
14678         u32 hi, lo, mac_offset;
14679         int addr_ok = 0;
14680
14681 #ifdef CONFIG_SPARC
14682         if (!tg3_get_macaddr_sparc(tp))
14683                 return 0;
14684 #endif
14685
14686         mac_offset = 0x7c;
14687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14688             tg3_flag(tp, 5780_CLASS)) {
14689                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14690                         mac_offset = 0xcc;
14691                 if (tg3_nvram_lock(tp))
14692                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14693                 else
14694                         tg3_nvram_unlock(tp);
14695         } else if (tg3_flag(tp, 5717_PLUS)) {
14696                 if (tp->pci_fn & 1)
14697                         mac_offset = 0xcc;
14698                 if (tp->pci_fn > 1)
14699                         mac_offset += 0x18c;
14700         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14701                 mac_offset = 0x10;
14702
14703         /* First try to get it from MAC address mailbox. */
14704         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14705         if ((hi >> 16) == 0x484b) {
14706                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14707                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14708
14709                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14710                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14711                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14712                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14713                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14714
14715                 /* Some old bootcode may report a 0 MAC address in SRAM */
14716                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14717         }
14718         if (!addr_ok) {
14719                 /* Next, try NVRAM. */
14720                 if (!tg3_flag(tp, NO_NVRAM) &&
14721                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14722                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14723                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14724                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14725                 }
14726                 /* Finally just fetch it out of the MAC control regs. */
14727                 else {
14728                         hi = tr32(MAC_ADDR_0_HIGH);
14729                         lo = tr32(MAC_ADDR_0_LOW);
14730
14731                         dev->dev_addr[5] = lo & 0xff;
14732                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14733                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14734                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14735                         dev->dev_addr[1] = hi & 0xff;
14736                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14737                 }
14738         }
14739
14740         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14741 #ifdef CONFIG_SPARC
14742                 if (!tg3_get_default_macaddr_sparc(tp))
14743                         return 0;
14744 #endif
14745                 return -EINVAL;
14746         }
14747         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14748         return 0;
14749 }
14750
14751 #define BOUNDARY_SINGLE_CACHELINE       1
14752 #define BOUNDARY_MULTI_CACHELINE        2
14753
14754 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14755 {
14756         int cacheline_size;
14757         u8 byte;
14758         int goal;
14759
14760         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14761         if (byte == 0)
14762                 cacheline_size = 1024;
14763         else
14764                 cacheline_size = (int) byte * 4;
14765
14766         /* On 5703 and later chips, the boundary bits have no
14767          * effect.
14768          */
14769         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14770             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14771             !tg3_flag(tp, PCI_EXPRESS))
14772                 goto out;
14773
14774 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14775         goal = BOUNDARY_MULTI_CACHELINE;
14776 #else
14777 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14778         goal = BOUNDARY_SINGLE_CACHELINE;
14779 #else
14780         goal = 0;
14781 #endif
14782 #endif
14783
14784         if (tg3_flag(tp, 57765_PLUS)) {
14785                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14786                 goto out;
14787         }
14788
14789         if (!goal)
14790                 goto out;
14791
14792         /* PCI controllers on most RISC systems tend to disconnect
14793          * when a device tries to burst across a cache-line boundary.
14794          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14795          *
14796          * Unfortunately, for PCI-E there are only limited
14797          * write-side controls for this, and thus for reads
14798          * we will still get the disconnects.  We'll also waste
14799          * these PCI cycles for both read and write for chips
14800          * other than 5700 and 5701 which do not implement the
14801          * boundary bits.
14802          */
14803         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14804                 switch (cacheline_size) {
14805                 case 16:
14806                 case 32:
14807                 case 64:
14808                 case 128:
14809                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14810                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14811                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14812                         } else {
14813                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14814                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14815                         }
14816                         break;
14817
14818                 case 256:
14819                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14820                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14821                         break;
14822
14823                 default:
14824                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14825                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14826                         break;
14827                 }
14828         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14829                 switch (cacheline_size) {
14830                 case 16:
14831                 case 32:
14832                 case 64:
14833                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14834                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14835                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14836                                 break;
14837                         }
14838                         /* fallthrough */
14839                 case 128:
14840                 default:
14841                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14842                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14843                         break;
14844                 }
14845         } else {
14846                 switch (cacheline_size) {
14847                 case 16:
14848                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14849                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14850                                         DMA_RWCTRL_WRITE_BNDRY_16);
14851                                 break;
14852                         }
14853                         /* fallthrough */
14854                 case 32:
14855                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14856                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14857                                         DMA_RWCTRL_WRITE_BNDRY_32);
14858                                 break;
14859                         }
14860                         /* fallthrough */
14861                 case 64:
14862                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14863                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14864                                         DMA_RWCTRL_WRITE_BNDRY_64);
14865                                 break;
14866                         }
14867                         /* fallthrough */
14868                 case 128:
14869                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14870                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14871                                         DMA_RWCTRL_WRITE_BNDRY_128);
14872                                 break;
14873                         }
14874                         /* fallthrough */
14875                 case 256:
14876                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14877                                 DMA_RWCTRL_WRITE_BNDRY_256);
14878                         break;
14879                 case 512:
14880                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14881                                 DMA_RWCTRL_WRITE_BNDRY_512);
14882                         break;
14883                 case 1024:
14884                 default:
14885                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14886                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14887                         break;
14888                 }
14889         }
14890
14891 out:
14892         return val;
14893 }
14894
14895 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14896 {
14897         struct tg3_internal_buffer_desc test_desc;
14898         u32 sram_dma_descs;
14899         int i, ret;
14900
14901         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14902
14903         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14904         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14905         tw32(RDMAC_STATUS, 0);
14906         tw32(WDMAC_STATUS, 0);
14907
14908         tw32(BUFMGR_MODE, 0);
14909         tw32(FTQ_RESET, 0);
14910
14911         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14912         test_desc.addr_lo = buf_dma & 0xffffffff;
14913         test_desc.nic_mbuf = 0x00002100;
14914         test_desc.len = size;
14915
14916         /*
14917          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14918          * the *second* time the tg3 driver was getting loaded after an
14919          * initial scan.
14920          *
14921          * Broadcom tells me:
14922          *   ...the DMA engine is connected to the GRC block and a DMA
14923          *   reset may affect the GRC block in some unpredictable way...
14924          *   The behavior of resets to individual blocks has not been tested.
14925          *
14926          * Broadcom noted the GRC reset will also reset all sub-components.
14927          */
14928         if (to_device) {
14929                 test_desc.cqid_sqid = (13 << 8) | 2;
14930
14931                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14932                 udelay(40);
14933         } else {
14934                 test_desc.cqid_sqid = (16 << 8) | 7;
14935
14936                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14937                 udelay(40);
14938         }
14939         test_desc.flags = 0x00000005;
14940
14941         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14942                 u32 val;
14943
14944                 val = *(((u32 *)&test_desc) + i);
14945                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14946                                        sram_dma_descs + (i * sizeof(u32)));
14947                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14948         }
14949         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14950
14951         if (to_device)
14952                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14953         else
14954                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14955
14956         ret = -ENODEV;
14957         for (i = 0; i < 40; i++) {
14958                 u32 val;
14959
14960                 if (to_device)
14961                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14962                 else
14963                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14964                 if ((val & 0xffff) == sram_dma_descs) {
14965                         ret = 0;
14966                         break;
14967                 }
14968
14969                 udelay(100);
14970         }
14971
14972         return ret;
14973 }
14974
14975 #define TEST_BUFFER_SIZE        0x2000
14976
14977 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14978         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14979         { },
14980 };
14981
14982 static int __devinit tg3_test_dma(struct tg3 *tp)
14983 {
14984         dma_addr_t buf_dma;
14985         u32 *buf, saved_dma_rwctrl;
14986         int ret = 0;
14987
14988         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14989                                  &buf_dma, GFP_KERNEL);
14990         if (!buf) {
14991                 ret = -ENOMEM;
14992                 goto out_nofree;
14993         }
14994
14995         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14996                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14997
14998         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14999
15000         if (tg3_flag(tp, 57765_PLUS))
15001                 goto out;
15002
15003         if (tg3_flag(tp, PCI_EXPRESS)) {
15004                 /* DMA read watermark not used on PCIE */
15005                 tp->dma_rwctrl |= 0x00180000;
15006         } else if (!tg3_flag(tp, PCIX_MODE)) {
15007                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15008                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15009                         tp->dma_rwctrl |= 0x003f0000;
15010                 else
15011                         tp->dma_rwctrl |= 0x003f000f;
15012         } else {
15013                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15014                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15015                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15016                         u32 read_water = 0x7;
15017
15018                         /* If the 5704 is behind the EPB bridge, we can
15019                          * do the less restrictive ONE_DMA workaround for
15020                          * better performance.
15021                          */
15022                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15023                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15024                                 tp->dma_rwctrl |= 0x8000;
15025                         else if (ccval == 0x6 || ccval == 0x7)
15026                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15027
15028                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15029                                 read_water = 4;
15030                         /* Set bit 23 to enable PCIX hw bug fix */
15031                         tp->dma_rwctrl |=
15032                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15033                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15034                                 (1 << 23);
15035                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15036                         /* 5780 always in PCIX mode */
15037                         tp->dma_rwctrl |= 0x00144000;
15038                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15039                         /* 5714 always in PCIX mode */
15040                         tp->dma_rwctrl |= 0x00148000;
15041                 } else {
15042                         tp->dma_rwctrl |= 0x001b000f;
15043                 }
15044         }
15045
15046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15048                 tp->dma_rwctrl &= 0xfffffff0;
15049
15050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15052                 /* Remove this if it causes problems for some boards. */
15053                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15054
15055                 /* On 5700/5701 chips, we need to set this bit.
15056                  * Otherwise the chip will issue cacheline transactions
15057                  * to streamable DMA memory with not all the byte
15058                  * enables turned on.  This is an error on several
15059                  * RISC PCI controllers, in particular sparc64.
15060                  *
15061                  * On 5703/5704 chips, this bit has been reassigned
15062                  * a different meaning.  In particular, it is used
15063                  * on those chips to enable a PCI-X workaround.
15064                  */
15065                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15066         }
15067
15068         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15069
15070 #if 0
15071         /* Unneeded, already done by tg3_get_invariants.  */
15072         tg3_switch_clocks(tp);
15073 #endif
15074
15075         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15076             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15077                 goto out;
15078
15079         /* It is best to perform DMA test with maximum write burst size
15080          * to expose the 5700/5701 write DMA bug.
15081          */
15082         saved_dma_rwctrl = tp->dma_rwctrl;
15083         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15084         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15085
15086         while (1) {
15087                 u32 *p = buf, i;
15088
15089                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15090                         p[i] = i;
15091
15092                 /* Send the buffer to the chip. */
15093                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15094                 if (ret) {
15095                         dev_err(&tp->pdev->dev,
15096                                 "%s: Buffer write failed. err = %d\n",
15097                                 __func__, ret);
15098                         break;
15099                 }
15100
15101 #if 0
15102                 /* validate data reached card RAM correctly. */
15103                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15104                         u32 val;
15105                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15106                         if (le32_to_cpu(val) != p[i]) {
15107                                 dev_err(&tp->pdev->dev,
15108                                         "%s: Buffer corrupted on device! "
15109                                         "(%d != %d)\n", __func__, val, i);
15110                                 /* ret = -ENODEV here? */
15111                         }
15112                         p[i] = 0;
15113                 }
15114 #endif
15115                 /* Now read it back. */
15116                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15117                 if (ret) {
15118                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15119                                 "err = %d\n", __func__, ret);
15120                         break;
15121                 }
15122
15123                 /* Verify it. */
15124                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15125                         if (p[i] == i)
15126                                 continue;
15127
15128                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15129                             DMA_RWCTRL_WRITE_BNDRY_16) {
15130                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15131                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15132                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15133                                 break;
15134                         } else {
15135                                 dev_err(&tp->pdev->dev,
15136                                         "%s: Buffer corrupted on read back! "
15137                                         "(%d != %d)\n", __func__, p[i], i);
15138                                 ret = -ENODEV;
15139                                 goto out;
15140                         }
15141                 }
15142
15143                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15144                         /* Success. */
15145                         ret = 0;
15146                         break;
15147                 }
15148         }
15149         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15150             DMA_RWCTRL_WRITE_BNDRY_16) {
15151                 /* DMA test passed without adjusting DMA boundary,
15152                  * now look for chipsets that are known to expose the
15153                  * DMA bug without failing the test.
15154                  */
15155                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15156                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15157                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15158                 } else {
15159                         /* Safe to use the calculated DMA boundary. */
15160                         tp->dma_rwctrl = saved_dma_rwctrl;
15161                 }
15162
15163                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15164         }
15165
15166 out:
15167         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15168 out_nofree:
15169         return ret;
15170 }
15171
15172 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15173 {
15174         if (tg3_flag(tp, 57765_PLUS)) {
15175                 tp->bufmgr_config.mbuf_read_dma_low_water =
15176                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15177                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15178                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15179                 tp->bufmgr_config.mbuf_high_water =
15180                         DEFAULT_MB_HIGH_WATER_57765;
15181
15182                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15183                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15184                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15185                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15186                 tp->bufmgr_config.mbuf_high_water_jumbo =
15187                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15188         } else if (tg3_flag(tp, 5705_PLUS)) {
15189                 tp->bufmgr_config.mbuf_read_dma_low_water =
15190                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15191                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15192                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15193                 tp->bufmgr_config.mbuf_high_water =
15194                         DEFAULT_MB_HIGH_WATER_5705;
15195                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15196                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15197                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15198                         tp->bufmgr_config.mbuf_high_water =
15199                                 DEFAULT_MB_HIGH_WATER_5906;
15200                 }
15201
15202                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15203                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15204                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15205                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15206                 tp->bufmgr_config.mbuf_high_water_jumbo =
15207                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15208         } else {
15209                 tp->bufmgr_config.mbuf_read_dma_low_water =
15210                         DEFAULT_MB_RDMA_LOW_WATER;
15211                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15212                         DEFAULT_MB_MACRX_LOW_WATER;
15213                 tp->bufmgr_config.mbuf_high_water =
15214                         DEFAULT_MB_HIGH_WATER;
15215
15216                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15217                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15218                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15219                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15220                 tp->bufmgr_config.mbuf_high_water_jumbo =
15221                         DEFAULT_MB_HIGH_WATER_JUMBO;
15222         }
15223
15224         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15225         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15226 }
15227
15228 static char * __devinit tg3_phy_string(struct tg3 *tp)
15229 {
15230         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15231         case TG3_PHY_ID_BCM5400:        return "5400";
15232         case TG3_PHY_ID_BCM5401:        return "5401";
15233         case TG3_PHY_ID_BCM5411:        return "5411";
15234         case TG3_PHY_ID_BCM5701:        return "5701";
15235         case TG3_PHY_ID_BCM5703:        return "5703";
15236         case TG3_PHY_ID_BCM5704:        return "5704";
15237         case TG3_PHY_ID_BCM5705:        return "5705";
15238         case TG3_PHY_ID_BCM5750:        return "5750";
15239         case TG3_PHY_ID_BCM5752:        return "5752";
15240         case TG3_PHY_ID_BCM5714:        return "5714";
15241         case TG3_PHY_ID_BCM5780:        return "5780";
15242         case TG3_PHY_ID_BCM5755:        return "5755";
15243         case TG3_PHY_ID_BCM5787:        return "5787";
15244         case TG3_PHY_ID_BCM5784:        return "5784";
15245         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15246         case TG3_PHY_ID_BCM5906:        return "5906";
15247         case TG3_PHY_ID_BCM5761:        return "5761";
15248         case TG3_PHY_ID_BCM5718C:       return "5718C";
15249         case TG3_PHY_ID_BCM5718S:       return "5718S";
15250         case TG3_PHY_ID_BCM57765:       return "57765";
15251         case TG3_PHY_ID_BCM5719C:       return "5719C";
15252         case TG3_PHY_ID_BCM5720C:       return "5720C";
15253         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15254         case 0:                 return "serdes";
15255         default:                return "unknown";
15256         }
15257 }
15258
15259 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15260 {
15261         if (tg3_flag(tp, PCI_EXPRESS)) {
15262                 strcpy(str, "PCI Express");
15263                 return str;
15264         } else if (tg3_flag(tp, PCIX_MODE)) {
15265                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15266
15267                 strcpy(str, "PCIX:");
15268
15269                 if ((clock_ctrl == 7) ||
15270                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15271                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15272                         strcat(str, "133MHz");
15273                 else if (clock_ctrl == 0)
15274                         strcat(str, "33MHz");
15275                 else if (clock_ctrl == 2)
15276                         strcat(str, "50MHz");
15277                 else if (clock_ctrl == 4)
15278                         strcat(str, "66MHz");
15279                 else if (clock_ctrl == 6)
15280                         strcat(str, "100MHz");
15281         } else {
15282                 strcpy(str, "PCI:");
15283                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15284                         strcat(str, "66MHz");
15285                 else
15286                         strcat(str, "33MHz");
15287         }
15288         if (tg3_flag(tp, PCI_32BIT))
15289                 strcat(str, ":32-bit");
15290         else
15291                 strcat(str, ":64-bit");
15292         return str;
15293 }
15294
15295 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15296 {
15297         struct pci_dev *peer;
15298         unsigned int func, devnr = tp->pdev->devfn & ~7;
15299
15300         for (func = 0; func < 8; func++) {
15301                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15302                 if (peer && peer != tp->pdev)
15303                         break;
15304                 pci_dev_put(peer);
15305         }
15306         /* 5704 can be configured in single-port mode, set peer to
15307          * tp->pdev in that case.
15308          */
15309         if (!peer) {
15310                 peer = tp->pdev;
15311                 return peer;
15312         }
15313
15314         /*
15315          * We don't need to keep the refcount elevated; there's no way
15316          * to remove one half of this device without removing the other
15317          */
15318         pci_dev_put(peer);
15319
15320         return peer;
15321 }
15322
15323 static void __devinit tg3_init_coal(struct tg3 *tp)
15324 {
15325         struct ethtool_coalesce *ec = &tp->coal;
15326
15327         memset(ec, 0, sizeof(*ec));
15328         ec->cmd = ETHTOOL_GCOALESCE;
15329         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15330         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15331         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15332         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15333         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15334         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15335         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15336         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15337         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15338
15339         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15340                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15341                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15342                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15343                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15344                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15345         }
15346
15347         if (tg3_flag(tp, 5705_PLUS)) {
15348                 ec->rx_coalesce_usecs_irq = 0;
15349                 ec->tx_coalesce_usecs_irq = 0;
15350                 ec->stats_block_coalesce_usecs = 0;
15351         }
15352 }
15353
15354 static const struct net_device_ops tg3_netdev_ops = {
15355         .ndo_open               = tg3_open,
15356         .ndo_stop               = tg3_close,
15357         .ndo_start_xmit         = tg3_start_xmit,
15358         .ndo_get_stats64        = tg3_get_stats64,
15359         .ndo_validate_addr      = eth_validate_addr,
15360         .ndo_set_rx_mode        = tg3_set_rx_mode,
15361         .ndo_set_mac_address    = tg3_set_mac_addr,
15362         .ndo_do_ioctl           = tg3_ioctl,
15363         .ndo_tx_timeout         = tg3_tx_timeout,
15364         .ndo_change_mtu         = tg3_change_mtu,
15365         .ndo_fix_features       = tg3_fix_features,
15366         .ndo_set_features       = tg3_set_features,
15367 #ifdef CONFIG_NET_POLL_CONTROLLER
15368         .ndo_poll_controller    = tg3_poll_controller,
15369 #endif
15370 };
15371
15372 static int __devinit tg3_init_one(struct pci_dev *pdev,
15373                                   const struct pci_device_id *ent)
15374 {
15375         struct net_device *dev;
15376         struct tg3 *tp;
15377         int i, err, pm_cap;
15378         u32 sndmbx, rcvmbx, intmbx;
15379         char str[40];
15380         u64 dma_mask, persist_dma_mask;
15381         u32 features = 0;
15382
15383         printk_once(KERN_INFO "%s\n", version);
15384
15385         err = pci_enable_device(pdev);
15386         if (err) {
15387                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15388                 return err;
15389         }
15390
15391         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15392         if (err) {
15393                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15394                 goto err_out_disable_pdev;
15395         }
15396
15397         pci_set_master(pdev);
15398
15399         /* Find power-management capability. */
15400         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15401         if (pm_cap == 0) {
15402                 dev_err(&pdev->dev,
15403                         "Cannot find Power Management capability, aborting\n");
15404                 err = -EIO;
15405                 goto err_out_free_res;
15406         }
15407
15408         err = pci_set_power_state(pdev, PCI_D0);
15409         if (err) {
15410                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15411                 goto err_out_free_res;
15412         }
15413
15414         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15415         if (!dev) {
15416                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15417                 err = -ENOMEM;
15418                 goto err_out_power_down;
15419         }
15420
15421         SET_NETDEV_DEV(dev, &pdev->dev);
15422
15423         tp = netdev_priv(dev);
15424         tp->pdev = pdev;
15425         tp->dev = dev;
15426         tp->pm_cap = pm_cap;
15427         tp->rx_mode = TG3_DEF_RX_MODE;
15428         tp->tx_mode = TG3_DEF_TX_MODE;
15429         tp->irq_sync = 1;
15430
15431         if (tg3_debug > 0)
15432                 tp->msg_enable = tg3_debug;
15433         else
15434                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15435
15436         /* The word/byte swap controls here control register access byte
15437          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15438          * setting below.
15439          */
15440         tp->misc_host_ctrl =
15441                 MISC_HOST_CTRL_MASK_PCI_INT |
15442                 MISC_HOST_CTRL_WORD_SWAP |
15443                 MISC_HOST_CTRL_INDIR_ACCESS |
15444                 MISC_HOST_CTRL_PCISTATE_RW;
15445
15446         /* The NONFRM (non-frame) byte/word swap controls take effect
15447          * on descriptor entries, anything which isn't packet data.
15448          *
15449          * The StrongARM chips on the board (one for tx, one for rx)
15450          * are running in big-endian mode.
15451          */
15452         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15453                         GRC_MODE_WSWAP_NONFRM_DATA);
15454 #ifdef __BIG_ENDIAN
15455         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15456 #endif
15457         spin_lock_init(&tp->lock);
15458         spin_lock_init(&tp->indirect_lock);
15459         INIT_WORK(&tp->reset_task, tg3_reset_task);
15460
15461         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15462         if (!tp->regs) {
15463                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15464                 err = -ENOMEM;
15465                 goto err_out_free_dev;
15466         }
15467
15468         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15469             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15470             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15471             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15472             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15473             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15474             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15475             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15476                 tg3_flag_set(tp, ENABLE_APE);
15477                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15478                 if (!tp->aperegs) {
15479                         dev_err(&pdev->dev,
15480                                 "Cannot map APE registers, aborting\n");
15481                         err = -ENOMEM;
15482                         goto err_out_iounmap;
15483                 }
15484         }
15485
15486         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15487         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15488
15489         dev->ethtool_ops = &tg3_ethtool_ops;
15490         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15491         dev->netdev_ops = &tg3_netdev_ops;
15492         dev->irq = pdev->irq;
15493
15494         err = tg3_get_invariants(tp);
15495         if (err) {
15496                 dev_err(&pdev->dev,
15497                         "Problem fetching invariants of chip, aborting\n");
15498                 goto err_out_apeunmap;
15499         }
15500
15501         /* The EPB bridge inside 5714, 5715, and 5780 and any
15502          * device behind the EPB cannot support DMA addresses > 40-bit.
15503          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15504          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15505          * do DMA address check in tg3_start_xmit().
15506          */
15507         if (tg3_flag(tp, IS_5788))
15508                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15509         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15510                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15511 #ifdef CONFIG_HIGHMEM
15512                 dma_mask = DMA_BIT_MASK(64);
15513 #endif
15514         } else
15515                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15516
15517         /* Configure DMA attributes. */
15518         if (dma_mask > DMA_BIT_MASK(32)) {
15519                 err = pci_set_dma_mask(pdev, dma_mask);
15520                 if (!err) {
15521                         features |= NETIF_F_HIGHDMA;
15522                         err = pci_set_consistent_dma_mask(pdev,
15523                                                           persist_dma_mask);
15524                         if (err < 0) {
15525                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15526                                         "DMA for consistent allocations\n");
15527                                 goto err_out_apeunmap;
15528                         }
15529                 }
15530         }
15531         if (err || dma_mask == DMA_BIT_MASK(32)) {
15532                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15533                 if (err) {
15534                         dev_err(&pdev->dev,
15535                                 "No usable DMA configuration, aborting\n");
15536                         goto err_out_apeunmap;
15537                 }
15538         }
15539
15540         tg3_init_bufmgr_config(tp);
15541
15542         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15543
15544         /* 5700 B0 chips do not support checksumming correctly due
15545          * to hardware bugs.
15546          */
15547         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15548                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15549
15550                 if (tg3_flag(tp, 5755_PLUS))
15551                         features |= NETIF_F_IPV6_CSUM;
15552         }
15553
15554         /* TSO is on by default on chips that support hardware TSO.
15555          * Firmware TSO on older chips gives lower performance, so it
15556          * is off by default, but can be enabled using ethtool.
15557          */
15558         if ((tg3_flag(tp, HW_TSO_1) ||
15559              tg3_flag(tp, HW_TSO_2) ||
15560              tg3_flag(tp, HW_TSO_3)) &&
15561             (features & NETIF_F_IP_CSUM))
15562                 features |= NETIF_F_TSO;
15563         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15564                 if (features & NETIF_F_IPV6_CSUM)
15565                         features |= NETIF_F_TSO6;
15566                 if (tg3_flag(tp, HW_TSO_3) ||
15567                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15568                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15569                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15570                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15571                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15572                         features |= NETIF_F_TSO_ECN;
15573         }
15574
15575         dev->features |= features;
15576         dev->vlan_features |= features;
15577
15578         /*
15579          * Add loopback capability only for a subset of devices that support
15580          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15581          * loopback for the remaining devices.
15582          */
15583         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15584             !tg3_flag(tp, CPMU_PRESENT))
15585                 /* Add the loopback capability */
15586                 features |= NETIF_F_LOOPBACK;
15587
15588         dev->hw_features |= features;
15589
15590         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15591             !tg3_flag(tp, TSO_CAPABLE) &&
15592             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15593                 tg3_flag_set(tp, MAX_RXPEND_64);
15594                 tp->rx_pending = 63;
15595         }
15596
15597         err = tg3_get_device_address(tp);
15598         if (err) {
15599                 dev_err(&pdev->dev,
15600                         "Could not obtain valid ethernet address, aborting\n");
15601                 goto err_out_apeunmap;
15602         }
15603
15604         /*
15605          * Reset chip in case UNDI or EFI driver did not shutdown
15606          * DMA self test will enable WDMAC and we'll see (spurious)
15607          * pending DMA on the PCI bus at that point.
15608          */
15609         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15610             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15611                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15612                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15613         }
15614
15615         err = tg3_test_dma(tp);
15616         if (err) {
15617                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15618                 goto err_out_apeunmap;
15619         }
15620
15621         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15622         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15623         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15624         for (i = 0; i < tp->irq_max; i++) {
15625                 struct tg3_napi *tnapi = &tp->napi[i];
15626
15627                 tnapi->tp = tp;
15628                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15629
15630                 tnapi->int_mbox = intmbx;
15631                 if (i <= 4)
15632                         intmbx += 0x8;
15633                 else
15634                         intmbx += 0x4;
15635
15636                 tnapi->consmbox = rcvmbx;
15637                 tnapi->prodmbox = sndmbx;
15638
15639                 if (i)
15640                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15641                 else
15642                         tnapi->coal_now = HOSTCC_MODE_NOW;
15643
15644                 if (!tg3_flag(tp, SUPPORT_MSIX))
15645                         break;
15646
15647                 /*
15648                  * If we support MSIX, we'll be using RSS.  If we're using
15649                  * RSS, the first vector only handles link interrupts and the
15650                  * remaining vectors handle rx and tx interrupts.  Reuse the
15651                  * mailbox values for the next iteration.  The values we setup
15652                  * above are still useful for the single vectored mode.
15653                  */
15654                 if (!i)
15655                         continue;
15656
15657                 rcvmbx += 0x8;
15658
15659                 if (sndmbx & 0x4)
15660                         sndmbx -= 0x4;
15661                 else
15662                         sndmbx += 0xc;
15663         }
15664
15665         tg3_init_coal(tp);
15666
15667         pci_set_drvdata(pdev, dev);
15668
15669         if (tg3_flag(tp, 5717_PLUS)) {
15670                 /* Resume a low-power mode */
15671                 tg3_frob_aux_power(tp, false);
15672         }
15673
15674         err = register_netdev(dev);
15675         if (err) {
15676                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15677                 goto err_out_apeunmap;
15678         }
15679
15680         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15681                     tp->board_part_number,
15682                     tp->pci_chip_rev_id,
15683                     tg3_bus_string(tp, str),
15684                     dev->dev_addr);
15685
15686         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15687                 struct phy_device *phydev;
15688                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15689                 netdev_info(dev,
15690                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15691                             phydev->drv->name, dev_name(&phydev->dev));
15692         } else {
15693                 char *ethtype;
15694
15695                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15696                         ethtype = "10/100Base-TX";
15697                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15698                         ethtype = "1000Base-SX";
15699                 else
15700                         ethtype = "10/100/1000Base-T";
15701
15702                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15703                             "(WireSpeed[%d], EEE[%d])\n",
15704                             tg3_phy_string(tp), ethtype,
15705                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15706                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15707         }
15708
15709         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15710                     (dev->features & NETIF_F_RXCSUM) != 0,
15711                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15712                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15713                     tg3_flag(tp, ENABLE_ASF) != 0,
15714                     tg3_flag(tp, TSO_CAPABLE) != 0);
15715         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15716                     tp->dma_rwctrl,
15717                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15718                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15719
15720         pci_save_state(pdev);
15721
15722         return 0;
15723
15724 err_out_apeunmap:
15725         if (tp->aperegs) {
15726                 iounmap(tp->aperegs);
15727                 tp->aperegs = NULL;
15728         }
15729
15730 err_out_iounmap:
15731         if (tp->regs) {
15732                 iounmap(tp->regs);
15733                 tp->regs = NULL;
15734         }
15735
15736 err_out_free_dev:
15737         free_netdev(dev);
15738
15739 err_out_power_down:
15740         pci_set_power_state(pdev, PCI_D3hot);
15741
15742 err_out_free_res:
15743         pci_release_regions(pdev);
15744
15745 err_out_disable_pdev:
15746         pci_disable_device(pdev);
15747         pci_set_drvdata(pdev, NULL);
15748         return err;
15749 }
15750
15751 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15752 {
15753         struct net_device *dev = pci_get_drvdata(pdev);
15754
15755         if (dev) {
15756                 struct tg3 *tp = netdev_priv(dev);
15757
15758                 if (tp->fw)
15759                         release_firmware(tp->fw);
15760
15761                 tg3_reset_task_cancel(tp);
15762
15763                 if (tg3_flag(tp, USE_PHYLIB)) {
15764                         tg3_phy_fini(tp);
15765                         tg3_mdio_fini(tp);
15766                 }
15767
15768                 unregister_netdev(dev);
15769                 if (tp->aperegs) {
15770                         iounmap(tp->aperegs);
15771                         tp->aperegs = NULL;
15772                 }
15773                 if (tp->regs) {
15774                         iounmap(tp->regs);
15775                         tp->regs = NULL;
15776                 }
15777                 free_netdev(dev);
15778                 pci_release_regions(pdev);
15779                 pci_disable_device(pdev);
15780                 pci_set_drvdata(pdev, NULL);
15781         }
15782 }
15783
15784 #ifdef CONFIG_PM_SLEEP
15785 static int tg3_suspend(struct device *device)
15786 {
15787         struct pci_dev *pdev = to_pci_dev(device);
15788         struct net_device *dev = pci_get_drvdata(pdev);
15789         struct tg3 *tp = netdev_priv(dev);
15790         int err;
15791
15792         if (!netif_running(dev))
15793                 return 0;
15794
15795         tg3_reset_task_cancel(tp);
15796         tg3_phy_stop(tp);
15797         tg3_netif_stop(tp);
15798
15799         del_timer_sync(&tp->timer);
15800
15801         tg3_full_lock(tp, 1);
15802         tg3_disable_ints(tp);
15803         tg3_full_unlock(tp);
15804
15805         netif_device_detach(dev);
15806
15807         tg3_full_lock(tp, 0);
15808         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15809         tg3_flag_clear(tp, INIT_COMPLETE);
15810         tg3_full_unlock(tp);
15811
15812         err = tg3_power_down_prepare(tp);
15813         if (err) {
15814                 int err2;
15815
15816                 tg3_full_lock(tp, 0);
15817
15818                 tg3_flag_set(tp, INIT_COMPLETE);
15819                 err2 = tg3_restart_hw(tp, 1);
15820                 if (err2)
15821                         goto out;
15822
15823                 tp->timer.expires = jiffies + tp->timer_offset;
15824                 add_timer(&tp->timer);
15825
15826                 netif_device_attach(dev);
15827                 tg3_netif_start(tp);
15828
15829 out:
15830                 tg3_full_unlock(tp);
15831
15832                 if (!err2)
15833                         tg3_phy_start(tp);
15834         }
15835
15836         return err;
15837 }
15838
15839 static int tg3_resume(struct device *device)
15840 {
15841         struct pci_dev *pdev = to_pci_dev(device);
15842         struct net_device *dev = pci_get_drvdata(pdev);
15843         struct tg3 *tp = netdev_priv(dev);
15844         int err;
15845
15846         if (!netif_running(dev))
15847                 return 0;
15848
15849         netif_device_attach(dev);
15850
15851         tg3_full_lock(tp, 0);
15852
15853         tg3_flag_set(tp, INIT_COMPLETE);
15854         err = tg3_restart_hw(tp, 1);
15855         if (err)
15856                 goto out;
15857
15858         tp->timer.expires = jiffies + tp->timer_offset;
15859         add_timer(&tp->timer);
15860
15861         tg3_netif_start(tp);
15862
15863 out:
15864         tg3_full_unlock(tp);
15865
15866         if (!err)
15867                 tg3_phy_start(tp);
15868
15869         return err;
15870 }
15871
15872 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15873 #define TG3_PM_OPS (&tg3_pm_ops)
15874
15875 #else
15876
15877 #define TG3_PM_OPS NULL
15878
15879 #endif /* CONFIG_PM_SLEEP */
15880
15881 /**
15882  * tg3_io_error_detected - called when PCI error is detected
15883  * @pdev: Pointer to PCI device
15884  * @state: The current pci connection state
15885  *
15886  * This function is called after a PCI bus error affecting
15887  * this device has been detected.
15888  */
15889 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15890                                               pci_channel_state_t state)
15891 {
15892         struct net_device *netdev = pci_get_drvdata(pdev);
15893         struct tg3 *tp = netdev_priv(netdev);
15894         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15895
15896         netdev_info(netdev, "PCI I/O error detected\n");
15897
15898         rtnl_lock();
15899
15900         if (!netif_running(netdev))
15901                 goto done;
15902
15903         tg3_phy_stop(tp);
15904
15905         tg3_netif_stop(tp);
15906
15907         del_timer_sync(&tp->timer);
15908
15909         /* Want to make sure that the reset task doesn't run */
15910         tg3_reset_task_cancel(tp);
15911         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15912
15913         netif_device_detach(netdev);
15914
15915         /* Clean up software state, even if MMIO is blocked */
15916         tg3_full_lock(tp, 0);
15917         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15918         tg3_full_unlock(tp);
15919
15920 done:
15921         if (state == pci_channel_io_perm_failure)
15922                 err = PCI_ERS_RESULT_DISCONNECT;
15923         else
15924                 pci_disable_device(pdev);
15925
15926         rtnl_unlock();
15927
15928         return err;
15929 }
15930
15931 /**
15932  * tg3_io_slot_reset - called after the pci bus has been reset.
15933  * @pdev: Pointer to PCI device
15934  *
15935  * Restart the card from scratch, as if from a cold-boot.
15936  * At this point, the card has exprienced a hard reset,
15937  * followed by fixups by BIOS, and has its config space
15938  * set up identically to what it was at cold boot.
15939  */
15940 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15941 {
15942         struct net_device *netdev = pci_get_drvdata(pdev);
15943         struct tg3 *tp = netdev_priv(netdev);
15944         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15945         int err;
15946
15947         rtnl_lock();
15948
15949         if (pci_enable_device(pdev)) {
15950                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15951                 goto done;
15952         }
15953
15954         pci_set_master(pdev);
15955         pci_restore_state(pdev);
15956         pci_save_state(pdev);
15957
15958         if (!netif_running(netdev)) {
15959                 rc = PCI_ERS_RESULT_RECOVERED;
15960                 goto done;
15961         }
15962
15963         err = tg3_power_up(tp);
15964         if (err)
15965                 goto done;
15966
15967         rc = PCI_ERS_RESULT_RECOVERED;
15968
15969 done:
15970         rtnl_unlock();
15971
15972         return rc;
15973 }
15974
15975 /**
15976  * tg3_io_resume - called when traffic can start flowing again.
15977  * @pdev: Pointer to PCI device
15978  *
15979  * This callback is called when the error recovery driver tells
15980  * us that its OK to resume normal operation.
15981  */
15982 static void tg3_io_resume(struct pci_dev *pdev)
15983 {
15984         struct net_device *netdev = pci_get_drvdata(pdev);
15985         struct tg3 *tp = netdev_priv(netdev);
15986         int err;
15987
15988         rtnl_lock();
15989
15990         if (!netif_running(netdev))
15991                 goto done;
15992
15993         tg3_full_lock(tp, 0);
15994         tg3_flag_set(tp, INIT_COMPLETE);
15995         err = tg3_restart_hw(tp, 1);
15996         tg3_full_unlock(tp);
15997         if (err) {
15998                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15999                 goto done;
16000         }
16001
16002         netif_device_attach(netdev);
16003
16004         tp->timer.expires = jiffies + tp->timer_offset;
16005         add_timer(&tp->timer);
16006
16007         tg3_netif_start(tp);
16008
16009         tg3_phy_start(tp);
16010
16011 done:
16012         rtnl_unlock();
16013 }
16014
16015 static struct pci_error_handlers tg3_err_handler = {
16016         .error_detected = tg3_io_error_detected,
16017         .slot_reset     = tg3_io_slot_reset,
16018         .resume         = tg3_io_resume
16019 };
16020
16021 static struct pci_driver tg3_driver = {
16022         .name           = DRV_MODULE_NAME,
16023         .id_table       = tg3_pci_tbl,
16024         .probe          = tg3_init_one,
16025         .remove         = __devexit_p(tg3_remove_one),
16026         .err_handler    = &tg3_err_handler,
16027         .driver.pm      = TG3_PM_OPS,
16028 };
16029
16030 static int __init tg3_init(void)
16031 {
16032         return pci_register_driver(&tg3_driver);
16033 }
16034
16035 static void __exit tg3_cleanup(void)
16036 {
16037         pci_unregister_driver(&tg3_driver);
16038 }
16039
16040 module_init(tg3_init);
16041 module_exit(tg3_cleanup);