a5ff82d3b7509eea9cc3797dabc4a9f5d19b721e
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase, bit;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++) {
620                 if (i == TG3_APE_LOCK_GPIO)
621                         continue;
622                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623         }
624
625         /* Clear the correct bit of the GPIO lock too. */
626         if (!tp->pci_fn)
627                 bit = APE_LOCK_GRANT_DRIVER;
628         else
629                 bit = 1 << tp->pci_fn;
630
631         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636         int i, off;
637         int ret = 0;
638         u32 status, req, gnt, bit;
639
640         if (!tg3_flag(tp, ENABLE_APE))
641                 return 0;
642
643         switch (locknum) {
644         case TG3_APE_LOCK_GPIO:
645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646                         return 0;
647         case TG3_APE_LOCK_GRC:
648         case TG3_APE_LOCK_MEM:
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655                 req = TG3_APE_LOCK_REQ;
656                 gnt = TG3_APE_LOCK_GRANT;
657         } else {
658                 req = TG3_APE_PER_LOCK_REQ;
659                 gnt = TG3_APE_PER_LOCK_GRANT;
660         }
661
662         off = 4 * locknum;
663
664         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665                 bit = APE_LOCK_REQ_DRIVER;
666         else
667                 bit = 1 << tp->pci_fn;
668
669         tg3_ape_write32(tp, req + off, bit);
670
671         /* Wait for up to 1 millisecond to acquire lock. */
672         for (i = 0; i < 100; i++) {
673                 status = tg3_ape_read32(tp, gnt + off);
674                 if (status == bit)
675                         break;
676                 udelay(10);
677         }
678
679         if (status != bit) {
680                 /* Revoke the lock request. */
681                 tg3_ape_write32(tp, gnt + off, bit);
682                 ret = -EBUSY;
683         }
684
685         return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690         u32 gnt, bit;
691
692         if (!tg3_flag(tp, ENABLE_APE))
693                 return;
694
695         switch (locknum) {
696         case TG3_APE_LOCK_GPIO:
697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698                         return;
699         case TG3_APE_LOCK_GRC:
700         case TG3_APE_LOCK_MEM:
701                 break;
702         default:
703                 return;
704         }
705
706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707                 gnt = TG3_APE_LOCK_GRANT;
708         else
709                 gnt = TG3_APE_PER_LOCK_GRANT;
710
711         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712                 bit = APE_LOCK_GRANT_DRIVER;
713         else
714                 bit = 1 << tp->pci_fn;
715
716         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721         int i;
722
723         tw32(TG3PCI_MISC_HOST_CTRL,
724              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725         for (i = 0; i < tp->irq_max; i++)
726                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731         int i;
732
733         tp->irq_sync = 0;
734         wmb();
735
736         tw32(TG3PCI_MISC_HOST_CTRL,
737              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740         for (i = 0; i < tp->irq_cnt; i++) {
741                 struct tg3_napi *tnapi = &tp->napi[i];
742
743                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744                 if (tg3_flag(tp, 1SHOT_MSI))
745                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747                 tp->coal_now |= tnapi->coal_now;
748         }
749
750         /* Force an initial interrupt */
751         if (!tg3_flag(tp, TAGGED_STATUS) &&
752             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754         else
755                 tw32(HOSTCC_MODE, tp->coal_now);
756
757         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762         struct tg3 *tp = tnapi->tp;
763         struct tg3_hw_status *sblk = tnapi->hw_status;
764         unsigned int work_exists = 0;
765
766         /* check for phy events */
767         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768                 if (sblk->status & SD_STATUS_LINK_CHG)
769                         work_exists = 1;
770         }
771         /* check for RX/TX work to do */
772         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774                 work_exists = 1;
775
776         return work_exists;
777 }
778
779 /* tg3_int_reenable
780  *  similar to tg3_enable_ints, but it accurately determines whether there
781  *  is new work pending and can return without flushing the PIO write
782  *  which reenables interrupts
783  */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786         struct tg3 *tp = tnapi->tp;
787
788         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789         mmiowb();
790
791         /* When doing tagged status, this work check is unnecessary.
792          * The last_tag we write above tells the chip which piece of
793          * work we've completed.
794          */
795         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796                 tw32(HOSTCC_MODE, tp->coalesce_mode |
797                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802         u32 clock_ctrl;
803         u32 orig_clock_ctrl;
804
805         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806                 return;
807
808         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810         orig_clock_ctrl = clock_ctrl;
811         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812                        CLOCK_CTRL_CLKRUN_OENABLE |
813                        0x1f);
814         tp->pci_clock_ctrl = clock_ctrl;
815
816         if (tg3_flag(tp, 5705_PLUS)) {
817                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
819                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820                 }
821         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823                             clock_ctrl |
824                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825                             40);
826                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
828                             40);
829         }
830         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS  5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842                 tw32_f(MAC_MI_MODE,
843                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844                 udelay(80);
845         }
846
847         *val = 0x0;
848
849         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850                       MI_COM_PHY_ADDR_MASK);
851         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852                       MI_COM_REG_ADDR_MASK);
853         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855         tw32_f(MAC_MI_COM, frame_val);
856
857         loops = PHY_BUSY_LOOPS;
858         while (loops != 0) {
859                 udelay(10);
860                 frame_val = tr32(MAC_MI_COM);
861
862                 if ((frame_val & MI_COM_BUSY) == 0) {
863                         udelay(5);
864                         frame_val = tr32(MAC_MI_COM);
865                         break;
866                 }
867                 loops -= 1;
868         }
869
870         ret = -EBUSY;
871         if (loops != 0) {
872                 *val = frame_val & MI_COM_DATA_MASK;
873                 ret = 0;
874         }
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886         u32 frame_val;
887         unsigned int loops;
888         int ret;
889
890         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892                 return 0;
893
894         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895                 tw32_f(MAC_MI_MODE,
896                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897                 udelay(80);
898         }
899
900         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901                       MI_COM_PHY_ADDR_MASK);
902         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903                       MI_COM_REG_ADDR_MASK);
904         frame_val |= (val & MI_COM_DATA_MASK);
905         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907         tw32_f(MAC_MI_COM, frame_val);
908
909         loops = PHY_BUSY_LOOPS;
910         while (loops != 0) {
911                 udelay(10);
912                 frame_val = tr32(MAC_MI_COM);
913                 if ((frame_val & MI_COM_BUSY) == 0) {
914                         udelay(5);
915                         frame_val = tr32(MAC_MI_COM);
916                         break;
917                 }
918                 loops -= 1;
919         }
920
921         ret = -EBUSY;
922         if (loops != 0)
923                 ret = 0;
924
925         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926                 tw32_f(MAC_MI_MODE, tp->mi_mode);
927                 udelay(80);
928         }
929
930         return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961         if (err)
962                 goto done;
963
964         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965         if (err)
966                 goto done;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970         if (err)
971                 goto done;
972
973         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976         return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981         int err;
982
983         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984         if (!err)
985                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987         return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992         int err;
993
994         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995         if (!err)
996                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998         return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003         int err;
1004
1005         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1008         if (!err)
1009                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011         return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017                 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033         u32 phy_control;
1034         int limit, err;
1035
1036         /* OK, reset it, and poll the BMCR_RESET bit until it
1037          * clears or we time out.
1038          */
1039         phy_control = BMCR_RESET;
1040         err = tg3_writephy(tp, MII_BMCR, phy_control);
1041         if (err != 0)
1042                 return -EBUSY;
1043
1044         limit = 5000;
1045         while (limit--) {
1046                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047                 if (err != 0)
1048                         return -EBUSY;
1049
1050                 if ((phy_control & BMCR_RESET) == 0) {
1051                         udelay(40);
1052                         break;
1053                 }
1054                 udelay(10);
1055         }
1056         if (limit < 0)
1057                 return -EBUSY;
1058
1059         return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064         struct tg3 *tp = bp->priv;
1065         u32 val;
1066
1067         spin_lock_bh(&tp->lock);
1068
1069         if (tg3_readphy(tp, reg, &val))
1070                 val = -EIO;
1071
1072         spin_unlock_bh(&tp->lock);
1073
1074         return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079         struct tg3 *tp = bp->priv;
1080         u32 ret = 0;
1081
1082         spin_lock_bh(&tp->lock);
1083
1084         if (tg3_writephy(tp, reg, val))
1085                 ret = -EIO;
1086
1087         spin_unlock_bh(&tp->lock);
1088
1089         return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094         return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099         u32 val;
1100         struct phy_device *phydev;
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104         case PHY_ID_BCM50610:
1105         case PHY_ID_BCM50610M:
1106                 val = MAC_PHYCFG2_50610_LED_MODES;
1107                 break;
1108         case PHY_ID_BCMAC131:
1109                 val = MAC_PHYCFG2_AC131_LED_MODES;
1110                 break;
1111         case PHY_ID_RTL8211C:
1112                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113                 break;
1114         case PHY_ID_RTL8201E:
1115                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116                 break;
1117         default:
1118                 return;
1119         }
1120
1121         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122                 tw32(MAC_PHYCFG2, val);
1123
1124                 val = tr32(MAC_PHYCFG1);
1125                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128                 tw32(MAC_PHYCFG1, val);
1129
1130                 return;
1131         }
1132
1133         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135                        MAC_PHYCFG2_FMODE_MASK_MASK |
1136                        MAC_PHYCFG2_GMODE_MASK_MASK |
1137                        MAC_PHYCFG2_ACT_MASK_MASK   |
1138                        MAC_PHYCFG2_QUAL_MASK_MASK |
1139                        MAC_PHYCFG2_INBAND_ENABLE;
1140
1141         tw32(MAC_PHYCFG2, val);
1142
1143         val = tr32(MAC_PHYCFG1);
1144         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151         }
1152         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154         tw32(MAC_PHYCFG1, val);
1155
1156         val = tr32(MAC_EXT_RGMII_MODE);
1157         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158                  MAC_RGMII_MODE_RX_QUALITY |
1159                  MAC_RGMII_MODE_RX_ACTIVITY |
1160                  MAC_RGMII_MODE_RX_ENG_DET |
1161                  MAC_RGMII_MODE_TX_ENABLE |
1162                  MAC_RGMII_MODE_TX_LOWPWR |
1163                  MAC_RGMII_MODE_TX_RESET);
1164         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166                         val |= MAC_RGMII_MODE_RX_INT_B |
1167                                MAC_RGMII_MODE_RX_QUALITY |
1168                                MAC_RGMII_MODE_RX_ACTIVITY |
1169                                MAC_RGMII_MODE_RX_ENG_DET;
1170                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171                         val |= MAC_RGMII_MODE_TX_ENABLE |
1172                                MAC_RGMII_MODE_TX_LOWPWR |
1173                                MAC_RGMII_MODE_TX_RESET;
1174         }
1175         tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181         tw32_f(MAC_MI_MODE, tp->mi_mode);
1182         udelay(80);
1183
1184         if (tg3_flag(tp, MDIOBUS_INITED) &&
1185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186                 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191         int i;
1192         u32 reg;
1193         struct phy_device *phydev;
1194
1195         if (tg3_flag(tp, 5717_PLUS)) {
1196                 u32 is_serdes;
1197
1198                 tp->phy_addr = tp->pci_fn + 1;
1199
1200                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202                 else
1203                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1205                 if (is_serdes)
1206                         tp->phy_addr += 7;
1207         } else
1208                 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210         tg3_mdio_start(tp);
1211
1212         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213                 return 0;
1214
1215         tp->mdio_bus = mdiobus_alloc();
1216         if (tp->mdio_bus == NULL)
1217                 return -ENOMEM;
1218
1219         tp->mdio_bus->name     = "tg3 mdio bus";
1220         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222         tp->mdio_bus->priv     = tp;
1223         tp->mdio_bus->parent   = &tp->pdev->dev;
1224         tp->mdio_bus->read     = &tg3_mdio_read;
1225         tp->mdio_bus->write    = &tg3_mdio_write;
1226         tp->mdio_bus->reset    = &tg3_mdio_reset;
1227         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1229
1230         for (i = 0; i < PHY_MAX_ADDR; i++)
1231                 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233         /* The bus registration will look for all the PHYs on the mdio bus.
1234          * Unfortunately, it does not ensure the PHY is powered up before
1235          * accessing the PHY ID registers.  A chip reset is the
1236          * quickest way to bring the device back to an operational state..
1237          */
1238         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239                 tg3_bmcr_reset(tp);
1240
1241         i = mdiobus_register(tp->mdio_bus);
1242         if (i) {
1243                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244                 mdiobus_free(tp->mdio_bus);
1245                 return i;
1246         }
1247
1248         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250         if (!phydev || !phydev->drv) {
1251                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252                 mdiobus_unregister(tp->mdio_bus);
1253                 mdiobus_free(tp->mdio_bus);
1254                 return -ENODEV;
1255         }
1256
1257         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258         case PHY_ID_BCM57780:
1259                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261                 break;
1262         case PHY_ID_BCM50610:
1263         case PHY_ID_BCM50610M:
1264                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265                                      PHY_BRCM_RX_REFCLK_UNUSED |
1266                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274                 /* fallthru */
1275         case PHY_ID_RTL8211C:
1276                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277                 break;
1278         case PHY_ID_RTL8201E:
1279         case PHY_ID_BCMAC131:
1280                 phydev->interface = PHY_INTERFACE_MODE_MII;
1281                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283                 break;
1284         }
1285
1286         tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289                 tg3_mdio_config_5785(tp);
1290
1291         return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296         if (tg3_flag(tp, MDIOBUS_INITED)) {
1297                 tg3_flag_clear(tp, MDIOBUS_INITED);
1298                 mdiobus_unregister(tp->mdio_bus);
1299                 mdiobus_free(tp->mdio_bus);
1300         }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306         u32 val;
1307
1308         val = tr32(GRC_RX_CPU_EVENT);
1309         val |= GRC_RX_CPU_DRIVER_EVENT;
1310         tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312         tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320         int i;
1321         unsigned int delay_cnt;
1322         long time_remain;
1323
1324         /* If enough time has passed, no wait is necessary. */
1325         time_remain = (long)(tp->last_event_jiffies + 1 +
1326                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327                       (long)jiffies;
1328         if (time_remain < 0)
1329                 return;
1330
1331         /* Check if we can shorten the wait time. */
1332         delay_cnt = jiffies_to_usecs(time_remain);
1333         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335         delay_cnt = (delay_cnt >> 3) + 1;
1336
1337         for (i = 0; i < delay_cnt; i++) {
1338                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339                         break;
1340                 udelay(8);
1341         }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347         u32 reg;
1348         u32 val;
1349
1350         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351                 return;
1352
1353         tg3_wait_for_event_ack(tp);
1354
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359         val = 0;
1360         if (!tg3_readphy(tp, MII_BMCR, &reg))
1361                 val = reg << 16;
1362         if (!tg3_readphy(tp, MII_BMSR, &reg))
1363                 val |= (reg & 0xffff);
1364         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366         val = 0;
1367         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368                 val = reg << 16;
1369         if (!tg3_readphy(tp, MII_LPA, &reg))
1370                 val |= (reg & 0xffff);
1371         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373         val = 0;
1374         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376                         val = reg << 16;
1377                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378                         val |= (reg & 0xffff);
1379         }
1380         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383                 val = reg << 16;
1384         else
1385                 val = 0;
1386         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388         tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393         if (!netif_carrier_ok(tp->dev)) {
1394                 netif_info(tp, link, tp->dev, "Link is down\n");
1395                 tg3_ump_link_report(tp);
1396         } else if (netif_msg_link(tp)) {
1397                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398                             (tp->link_config.active_speed == SPEED_1000 ?
1399                              1000 :
1400                              (tp->link_config.active_speed == SPEED_100 ?
1401                               100 : 10)),
1402                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1403                              "full" : "half"));
1404
1405                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407                             "on" : "off",
1408                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409                             "on" : "off");
1410
1411                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412                         netdev_info(tp->dev, "EEE is %s\n",
1413                                     tp->setlpicnt ? "enabled" : "disabled");
1414
1415                 tg3_ump_link_report(tp);
1416         }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421         u16 miireg;
1422
1423         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424                 miireg = ADVERTISE_PAUSE_CAP;
1425         else if (flow_ctrl & FLOW_CTRL_TX)
1426                 miireg = ADVERTISE_PAUSE_ASYM;
1427         else if (flow_ctrl & FLOW_CTRL_RX)
1428                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429         else
1430                 miireg = 0;
1431
1432         return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437         u16 miireg;
1438
1439         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440                 miireg = ADVERTISE_1000XPAUSE;
1441         else if (flow_ctrl & FLOW_CTRL_TX)
1442                 miireg = ADVERTISE_1000XPSE_ASYM;
1443         else if (flow_ctrl & FLOW_CTRL_RX)
1444                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445         else
1446                 miireg = 0;
1447
1448         return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453         u8 cap = 0;
1454
1455         if (lcladv & ADVERTISE_1000XPAUSE) {
1456                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457                         if (rmtadv & LPA_1000XPAUSE)
1458                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460                                 cap = FLOW_CTRL_RX;
1461                 } else {
1462                         if (rmtadv & LPA_1000XPAUSE)
1463                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464                 }
1465         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467                         cap = FLOW_CTRL_TX;
1468         }
1469
1470         return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475         u8 autoneg;
1476         u8 flowctrl = 0;
1477         u32 old_rx_mode = tp->rx_mode;
1478         u32 old_tx_mode = tp->tx_mode;
1479
1480         if (tg3_flag(tp, USE_PHYLIB))
1481                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482         else
1483                 autoneg = tp->link_config.autoneg;
1484
1485         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488                 else
1489                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490         } else
1491                 flowctrl = tp->link_config.flowctrl;
1492
1493         tp->link_config.active_flowctrl = flowctrl;
1494
1495         if (flowctrl & FLOW_CTRL_RX)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode)
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503         if (flowctrl & FLOW_CTRL_TX)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode)
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514         u8 oldflowctrl, linkmesg = 0;
1515         u32 mac_mode, lcl_adv, rmt_adv;
1516         struct tg3 *tp = netdev_priv(dev);
1517         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519         spin_lock_bh(&tp->lock);
1520
1521         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522                                     MAC_MODE_HALF_DUPLEX);
1523
1524         oldflowctrl = tp->link_config.active_flowctrl;
1525
1526         if (phydev->link) {
1527                 lcl_adv = 0;
1528                 rmt_adv = 0;
1529
1530                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1532                 else if (phydev->speed == SPEED_1000 ||
1533                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535                 else
1536                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538                 if (phydev->duplex == DUPLEX_HALF)
1539                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1540                 else {
1541                         lcl_adv = tg3_advert_flowctrl_1000T(
1542                                   tp->link_config.flowctrl);
1543
1544                         if (phydev->pause)
1545                                 rmt_adv = LPA_PAUSE_CAP;
1546                         if (phydev->asym_pause)
1547                                 rmt_adv |= LPA_PAUSE_ASYM;
1548                 }
1549
1550                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551         } else
1552                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554         if (mac_mode != tp->mac_mode) {
1555                 tp->mac_mode = mac_mode;
1556                 tw32_f(MAC_MODE, tp->mac_mode);
1557                 udelay(40);
1558         }
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561                 if (phydev->speed == SPEED_10)
1562                         tw32(MAC_MI_STAT,
1563                              MAC_MI_STAT_10MBPS_MODE |
1564                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565                 else
1566                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567         }
1568
1569         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570                 tw32(MAC_TX_LENGTHS,
1571                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572                       (6 << TX_LENGTHS_IPG_SHIFT) |
1573                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574         else
1575                 tw32(MAC_TX_LENGTHS,
1576                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577                       (6 << TX_LENGTHS_IPG_SHIFT) |
1578                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582             phydev->speed != tp->link_config.active_speed ||
1583             phydev->duplex != tp->link_config.active_duplex ||
1584             oldflowctrl != tp->link_config.active_flowctrl)
1585                 linkmesg = 1;
1586
1587         tp->link_config.active_speed = phydev->speed;
1588         tp->link_config.active_duplex = phydev->duplex;
1589
1590         spin_unlock_bh(&tp->lock);
1591
1592         if (linkmesg)
1593                 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598         struct phy_device *phydev;
1599
1600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601                 return 0;
1602
1603         /* Bring the PHY back to a known state. */
1604         tg3_bmcr_reset(tp);
1605
1606         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608         /* Attach the MAC to the PHY. */
1609         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610                              phydev->dev_flags, phydev->interface);
1611         if (IS_ERR(phydev)) {
1612                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613                 return PTR_ERR(phydev);
1614         }
1615
1616         /* Mask with MAC supported features. */
1617         switch (phydev->interface) {
1618         case PHY_INTERFACE_MODE_GMII:
1619         case PHY_INTERFACE_MODE_RGMII:
1620                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621                         phydev->supported &= (PHY_GBIT_FEATURES |
1622                                               SUPPORTED_Pause |
1623                                               SUPPORTED_Asym_Pause);
1624                         break;
1625                 }
1626                 /* fallthru */
1627         case PHY_INTERFACE_MODE_MII:
1628                 phydev->supported &= (PHY_BASIC_FEATURES |
1629                                       SUPPORTED_Pause |
1630                                       SUPPORTED_Asym_Pause);
1631                 break;
1632         default:
1633                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634                 return -EINVAL;
1635         }
1636
1637         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639         phydev->advertising = phydev->supported;
1640
1641         return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646         struct phy_device *phydev;
1647
1648         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649                 return;
1650
1651         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655                 phydev->speed = tp->link_config.orig_speed;
1656                 phydev->duplex = tp->link_config.orig_duplex;
1657                 phydev->autoneg = tp->link_config.orig_autoneg;
1658                 phydev->advertising = tp->link_config.orig_advertising;
1659         }
1660
1661         phy_start(phydev);
1662
1663         phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669                 return;
1670
1671         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679         }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684         u32 phytest;
1685
1686         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687                 u32 phy;
1688
1689                 tg3_writephy(tp, MII_TG3_FET_TEST,
1690                              phytest | MII_TG3_FET_SHADOW_EN);
1691                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692                         if (enable)
1693                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694                         else
1695                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697                 }
1698                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699         }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704         u32 reg;
1705
1706         if (!tg3_flag(tp, 5705_PLUS) ||
1707             (tg3_flag(tp, 5717_PLUS) &&
1708              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709                 return;
1710
1711         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712                 tg3_phy_fet_toggle_apd(tp, enable);
1713                 return;
1714         }
1715
1716         reg = MII_TG3_MISC_SHDW_WREN |
1717               MII_TG3_MISC_SHDW_SCR5_SEL |
1718               MII_TG3_MISC_SHDW_SCR5_LPED |
1719               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720               MII_TG3_MISC_SHDW_SCR5_SDTL |
1721               MII_TG3_MISC_SHDW_SCR5_C125OE;
1722         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728         reg = MII_TG3_MISC_SHDW_WREN |
1729               MII_TG3_MISC_SHDW_APD_SEL |
1730               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731         if (enable)
1732                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739         u32 phy;
1740
1741         if (!tg3_flag(tp, 5705_PLUS) ||
1742             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743                 return;
1744
1745         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746                 u32 ephy;
1747
1748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751                         tg3_writephy(tp, MII_TG3_FET_TEST,
1752                                      ephy | MII_TG3_FET_SHADOW_EN);
1753                         if (!tg3_readphy(tp, reg, &phy)) {
1754                                 if (enable)
1755                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756                                 else
1757                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758                                 tg3_writephy(tp, reg, phy);
1759                         }
1760                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761                 }
1762         } else {
1763                 int ret;
1764
1765                 ret = tg3_phy_auxctl_read(tp,
1766                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767                 if (!ret) {
1768                         if (enable)
1769                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770                         else
1771                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772                         tg3_phy_auxctl_write(tp,
1773                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774                 }
1775         }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780         int ret;
1781         u32 val;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784                 return;
1785
1786         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787         if (!ret)
1788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794         u32 otp, phy;
1795
1796         if (!tp->phy_otp)
1797                 return;
1798
1799         otp = tp->phy_otp;
1800
1801         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802                 return;
1803
1804         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831         u32 val;
1832
1833         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834                 return;
1835
1836         tp->setlpicnt = 0;
1837
1838         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839             current_link_up == 1 &&
1840             tp->link_config.active_duplex == DUPLEX_FULL &&
1841             (tp->link_config.active_speed == SPEED_100 ||
1842              tp->link_config.active_speed == SPEED_1000)) {
1843                 u32 eeectl;
1844
1845                 if (tp->link_config.active_speed == SPEED_1000)
1846                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847                 else
1848                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853                                   TG3_CL45_D7_EEERES_STAT, &val);
1854
1855                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857                         tp->setlpicnt = 2;
1858         }
1859
1860         if (!tp->setlpicnt) {
1861                 val = tr32(TG3_CPMU_EEE_MODE);
1862                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1863         }
1864 }
1865
1866 static void tg3_phy_eee_enable(struct tg3 *tp)
1867 {
1868         u32 val;
1869
1870         if (tp->link_config.active_speed == SPEED_1000 &&
1871             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1872              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1873              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1874             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1875                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1876                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1877         }
1878
1879         val = tr32(TG3_CPMU_EEE_MODE);
1880         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1881 }
1882
1883 static int tg3_wait_macro_done(struct tg3 *tp)
1884 {
1885         int limit = 100;
1886
1887         while (limit--) {
1888                 u32 tmp32;
1889
1890                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1891                         if ((tmp32 & 0x1000) == 0)
1892                                 break;
1893                 }
1894         }
1895         if (limit < 0)
1896                 return -EBUSY;
1897
1898         return 0;
1899 }
1900
1901 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1902 {
1903         static const u32 test_pat[4][6] = {
1904         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1905         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1906         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1907         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1908         };
1909         int chan;
1910
1911         for (chan = 0; chan < 4; chan++) {
1912                 int i;
1913
1914                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1915                              (chan * 0x2000) | 0x0200);
1916                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1917
1918                 for (i = 0; i < 6; i++)
1919                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1920                                      test_pat[chan][i]);
1921
1922                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1923                 if (tg3_wait_macro_done(tp)) {
1924                         *resetp = 1;
1925                         return -EBUSY;
1926                 }
1927
1928                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1929                              (chan * 0x2000) | 0x0200);
1930                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1931                 if (tg3_wait_macro_done(tp)) {
1932                         *resetp = 1;
1933                         return -EBUSY;
1934                 }
1935
1936                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1937                 if (tg3_wait_macro_done(tp)) {
1938                         *resetp = 1;
1939                         return -EBUSY;
1940                 }
1941
1942                 for (i = 0; i < 6; i += 2) {
1943                         u32 low, high;
1944
1945                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1946                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1947                             tg3_wait_macro_done(tp)) {
1948                                 *resetp = 1;
1949                                 return -EBUSY;
1950                         }
1951                         low &= 0x7fff;
1952                         high &= 0x000f;
1953                         if (low != test_pat[chan][i] ||
1954                             high != test_pat[chan][i+1]) {
1955                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1956                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1957                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1958
1959                                 return -EBUSY;
1960                         }
1961                 }
1962         }
1963
1964         return 0;
1965 }
1966
1967 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1968 {
1969         int chan;
1970
1971         for (chan = 0; chan < 4; chan++) {
1972                 int i;
1973
1974                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1975                              (chan * 0x2000) | 0x0200);
1976                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1977                 for (i = 0; i < 6; i++)
1978                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1979                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1980                 if (tg3_wait_macro_done(tp))
1981                         return -EBUSY;
1982         }
1983
1984         return 0;
1985 }
1986
1987 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1988 {
1989         u32 reg32, phy9_orig;
1990         int retries, do_phy_reset, err;
1991
1992         retries = 10;
1993         do_phy_reset = 1;
1994         do {
1995                 if (do_phy_reset) {
1996                         err = tg3_bmcr_reset(tp);
1997                         if (err)
1998                                 return err;
1999                         do_phy_reset = 0;
2000                 }
2001
2002                 /* Disable transmitter and interrupt.  */
2003                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2004                         continue;
2005
2006                 reg32 |= 0x3000;
2007                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2008
2009                 /* Set full-duplex, 1000 mbps.  */
2010                 tg3_writephy(tp, MII_BMCR,
2011                              BMCR_FULLDPLX | BMCR_SPEED1000);
2012
2013                 /* Set to master mode.  */
2014                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2015                         continue;
2016
2017                 tg3_writephy(tp, MII_CTRL1000,
2018                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2019
2020                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2021                 if (err)
2022                         return err;
2023
2024                 /* Block the PHY control access.  */
2025                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2026
2027                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2028                 if (!err)
2029                         break;
2030         } while (--retries);
2031
2032         err = tg3_phy_reset_chanpat(tp);
2033         if (err)
2034                 return err;
2035
2036         tg3_phydsp_write(tp, 0x8005, 0x0000);
2037
2038         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2039         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2040
2041         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2042
2043         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2044
2045         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2046                 reg32 &= ~0x3000;
2047                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2048         } else if (!err)
2049                 err = -EBUSY;
2050
2051         return err;
2052 }
2053
2054 /* This will reset the tigon3 PHY if there is no valid
2055  * link unless the FORCE argument is non-zero.
2056  */
2057 static int tg3_phy_reset(struct tg3 *tp)
2058 {
2059         u32 val, cpmuctrl;
2060         int err;
2061
2062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2063                 val = tr32(GRC_MISC_CFG);
2064                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2065                 udelay(40);
2066         }
2067         err  = tg3_readphy(tp, MII_BMSR, &val);
2068         err |= tg3_readphy(tp, MII_BMSR, &val);
2069         if (err != 0)
2070                 return -EBUSY;
2071
2072         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2073                 netif_carrier_off(tp->dev);
2074                 tg3_link_report(tp);
2075         }
2076
2077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2079             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2080                 err = tg3_phy_reset_5703_4_5(tp);
2081                 if (err)
2082                         return err;
2083                 goto out;
2084         }
2085
2086         cpmuctrl = 0;
2087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2088             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2089                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2090                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2091                         tw32(TG3_CPMU_CTRL,
2092                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2093         }
2094
2095         err = tg3_bmcr_reset(tp);
2096         if (err)
2097                 return err;
2098
2099         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2100                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2101                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2102
2103                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2104         }
2105
2106         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2107             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2108                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2109                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2110                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2111                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2112                         udelay(40);
2113                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2114                 }
2115         }
2116
2117         if (tg3_flag(tp, 5717_PLUS) &&
2118             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2119                 return 0;
2120
2121         tg3_phy_apply_otp(tp);
2122
2123         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2124                 tg3_phy_toggle_apd(tp, true);
2125         else
2126                 tg3_phy_toggle_apd(tp, false);
2127
2128 out:
2129         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2130             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2131                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2132                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2133                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134         }
2135
2136         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2137                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2139         }
2140
2141         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2142                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2144                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2145                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2146                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147                 }
2148         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2149                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2150                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2151                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2152                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2153                                 tg3_writephy(tp, MII_TG3_TEST1,
2154                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2155                         } else
2156                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2157
2158                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2159                 }
2160         }
2161
2162         /* Set Extended packet length bit (bit 14) on all chips that */
2163         /* support jumbo frames */
2164         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2165                 /* Cannot do read-modify-write on 5401 */
2166                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2167         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2168                 /* Set bit 14 with read-modify-write to preserve other bits */
2169                 err = tg3_phy_auxctl_read(tp,
2170                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2171                 if (!err)
2172                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2173                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2174         }
2175
2176         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2177          * jumbo frames transmission.
2178          */
2179         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2180                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2181                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2182                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2183         }
2184
2185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2186                 /* adjust output voltage */
2187                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2188         }
2189
2190         tg3_phy_toggle_automdix(tp, 1);
2191         tg3_phy_set_wirespeed(tp);
2192         return 0;
2193 }
2194
2195 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2196 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2197 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2198                                           TG3_GPIO_MSG_NEED_VAUX)
2199 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2200         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2201          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2202          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2203          (TG3_GPIO_MSG_DRVR_PRES << 12))
2204
2205 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2206         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2207          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2208          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2209          (TG3_GPIO_MSG_NEED_VAUX << 12))
2210
2211 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2212 {
2213         u32 status, shift;
2214
2215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2216             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2217                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2218         else
2219                 status = tr32(TG3_CPMU_DRV_STATUS);
2220
2221         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2222         status &= ~(TG3_GPIO_MSG_MASK << shift);
2223         status |= (newstat << shift);
2224
2225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2226             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2227                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2228         else
2229                 tw32(TG3_CPMU_DRV_STATUS, status);
2230
2231         return status >> TG3_APE_GPIO_MSG_SHIFT;
2232 }
2233
2234 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2235 {
2236         if (!tg3_flag(tp, IS_NIC))
2237                 return 0;
2238
2239         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2240             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2241             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2242                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2243                         return -EIO;
2244
2245                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2246
2247                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2248                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2249
2250                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2251         } else {
2252                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2253                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2254         }
2255
2256         return 0;
2257 }
2258
2259 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2260 {
2261         u32 grc_local_ctrl;
2262
2263         if (!tg3_flag(tp, IS_NIC) ||
2264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2265             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2266                 return;
2267
2268         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2269
2270         tw32_wait_f(GRC_LOCAL_CTRL,
2271                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2272                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2273
2274         tw32_wait_f(GRC_LOCAL_CTRL,
2275                     grc_local_ctrl,
2276                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2277
2278         tw32_wait_f(GRC_LOCAL_CTRL,
2279                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2281 }
2282
2283 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2284 {
2285         if (!tg3_flag(tp, IS_NIC))
2286                 return;
2287
2288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2290                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2291                             (GRC_LCLCTRL_GPIO_OE0 |
2292                              GRC_LCLCTRL_GPIO_OE1 |
2293                              GRC_LCLCTRL_GPIO_OE2 |
2294                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2295                              GRC_LCLCTRL_GPIO_OUTPUT1),
2296                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2297         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2298                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2299                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2300                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2301                                      GRC_LCLCTRL_GPIO_OE1 |
2302                                      GRC_LCLCTRL_GPIO_OE2 |
2303                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2304                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2305                                      tp->grc_local_ctrl;
2306                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2307                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2308
2309                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2310                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2311                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2312
2313                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2314                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2316         } else {
2317                 u32 no_gpio2;
2318                 u32 grc_local_ctrl = 0;
2319
2320                 /* Workaround to prevent overdrawing Amps. */
2321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2322                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2323                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2324                                     grc_local_ctrl,
2325                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2326                 }
2327
2328                 /* On 5753 and variants, GPIO2 cannot be used. */
2329                 no_gpio2 = tp->nic_sram_data_cfg &
2330                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2331
2332                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2333                                   GRC_LCLCTRL_GPIO_OE1 |
2334                                   GRC_LCLCTRL_GPIO_OE2 |
2335                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2336                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2337                 if (no_gpio2) {
2338                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2339                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2340                 }
2341                 tw32_wait_f(GRC_LOCAL_CTRL,
2342                             tp->grc_local_ctrl | grc_local_ctrl,
2343                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2344
2345                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2346
2347                 tw32_wait_f(GRC_LOCAL_CTRL,
2348                             tp->grc_local_ctrl | grc_local_ctrl,
2349                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2350
2351                 if (!no_gpio2) {
2352                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2353                         tw32_wait_f(GRC_LOCAL_CTRL,
2354                                     tp->grc_local_ctrl | grc_local_ctrl,
2355                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2356                 }
2357         }
2358 }
2359
2360 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2361 {
2362         u32 msg = 0;
2363
2364         /* Serialize power state transitions */
2365         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2366                 return;
2367
2368         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2369                 msg = TG3_GPIO_MSG_NEED_VAUX;
2370
2371         msg = tg3_set_function_status(tp, msg);
2372
2373         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2374                 goto done;
2375
2376         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2377                 tg3_pwrsrc_switch_to_vaux(tp);
2378         else
2379                 tg3_pwrsrc_die_with_vmain(tp);
2380
2381 done:
2382         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2383 }
2384
2385 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2386 {
2387         bool need_vaux = false;
2388
2389         /* The GPIOs do something completely different on 57765. */
2390         if (!tg3_flag(tp, IS_NIC) ||
2391             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2392                 return;
2393
2394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2395             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2396             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2397                 tg3_frob_aux_power_5717(tp, include_wol ?
2398                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2399                 return;
2400         }
2401
2402         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2403                 struct net_device *dev_peer;
2404
2405                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2406
2407                 /* remove_one() may have been run on the peer. */
2408                 if (dev_peer) {
2409                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2410
2411                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2412                                 return;
2413
2414                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2415                             tg3_flag(tp_peer, ENABLE_ASF))
2416                                 need_vaux = true;
2417                 }
2418         }
2419
2420         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2421             tg3_flag(tp, ENABLE_ASF))
2422                 need_vaux = true;
2423
2424         if (need_vaux)
2425                 tg3_pwrsrc_switch_to_vaux(tp);
2426         else
2427                 tg3_pwrsrc_die_with_vmain(tp);
2428 }
2429
2430 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2431 {
2432         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2433                 return 1;
2434         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2435                 if (speed != SPEED_10)
2436                         return 1;
2437         } else if (speed == SPEED_10)
2438                 return 1;
2439
2440         return 0;
2441 }
2442
2443 static int tg3_setup_phy(struct tg3 *, int);
2444
2445 #define RESET_KIND_SHUTDOWN     0
2446 #define RESET_KIND_INIT         1
2447 #define RESET_KIND_SUSPEND      2
2448
2449 static void tg3_write_sig_post_reset(struct tg3 *, int);
2450 static int tg3_halt_cpu(struct tg3 *, u32);
2451
2452 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2453 {
2454         u32 val;
2455
2456         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2457                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2458                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2459                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2460
2461                         sg_dig_ctrl |=
2462                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2463                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2464                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2465                 }
2466                 return;
2467         }
2468
2469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2470                 tg3_bmcr_reset(tp);
2471                 val = tr32(GRC_MISC_CFG);
2472                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2473                 udelay(40);
2474                 return;
2475         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2476                 u32 phytest;
2477                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2478                         u32 phy;
2479
2480                         tg3_writephy(tp, MII_ADVERTISE, 0);
2481                         tg3_writephy(tp, MII_BMCR,
2482                                      BMCR_ANENABLE | BMCR_ANRESTART);
2483
2484                         tg3_writephy(tp, MII_TG3_FET_TEST,
2485                                      phytest | MII_TG3_FET_SHADOW_EN);
2486                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2487                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2488                                 tg3_writephy(tp,
2489                                              MII_TG3_FET_SHDW_AUXMODE4,
2490                                              phy);
2491                         }
2492                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2493                 }
2494                 return;
2495         } else if (do_low_power) {
2496                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2497                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2498
2499                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2500                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2501                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2502                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2503         }
2504
2505         /* The PHY should not be powered down on some chips because
2506          * of bugs.
2507          */
2508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2509             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2510             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2511              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2512                 return;
2513
2514         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2515             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2516                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2517                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2518                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2519                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2520         }
2521
2522         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2523 }
2524
2525 /* tp->lock is held. */
2526 static int tg3_nvram_lock(struct tg3 *tp)
2527 {
2528         if (tg3_flag(tp, NVRAM)) {
2529                 int i;
2530
2531                 if (tp->nvram_lock_cnt == 0) {
2532                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2533                         for (i = 0; i < 8000; i++) {
2534                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2535                                         break;
2536                                 udelay(20);
2537                         }
2538                         if (i == 8000) {
2539                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2540                                 return -ENODEV;
2541                         }
2542                 }
2543                 tp->nvram_lock_cnt++;
2544         }
2545         return 0;
2546 }
2547
2548 /* tp->lock is held. */
2549 static void tg3_nvram_unlock(struct tg3 *tp)
2550 {
2551         if (tg3_flag(tp, NVRAM)) {
2552                 if (tp->nvram_lock_cnt > 0)
2553                         tp->nvram_lock_cnt--;
2554                 if (tp->nvram_lock_cnt == 0)
2555                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2556         }
2557 }
2558
2559 /* tp->lock is held. */
2560 static void tg3_enable_nvram_access(struct tg3 *tp)
2561 {
2562         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2563                 u32 nvaccess = tr32(NVRAM_ACCESS);
2564
2565                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2566         }
2567 }
2568
2569 /* tp->lock is held. */
2570 static void tg3_disable_nvram_access(struct tg3 *tp)
2571 {
2572         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2573                 u32 nvaccess = tr32(NVRAM_ACCESS);
2574
2575                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2576         }
2577 }
2578
2579 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2580                                         u32 offset, u32 *val)
2581 {
2582         u32 tmp;
2583         int i;
2584
2585         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2586                 return -EINVAL;
2587
2588         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2589                                         EEPROM_ADDR_DEVID_MASK |
2590                                         EEPROM_ADDR_READ);
2591         tw32(GRC_EEPROM_ADDR,
2592              tmp |
2593              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2594              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2595               EEPROM_ADDR_ADDR_MASK) |
2596              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2597
2598         for (i = 0; i < 1000; i++) {
2599                 tmp = tr32(GRC_EEPROM_ADDR);
2600
2601                 if (tmp & EEPROM_ADDR_COMPLETE)
2602                         break;
2603                 msleep(1);
2604         }
2605         if (!(tmp & EEPROM_ADDR_COMPLETE))
2606                 return -EBUSY;
2607
2608         tmp = tr32(GRC_EEPROM_DATA);
2609
2610         /*
2611          * The data will always be opposite the native endian
2612          * format.  Perform a blind byteswap to compensate.
2613          */
2614         *val = swab32(tmp);
2615
2616         return 0;
2617 }
2618
2619 #define NVRAM_CMD_TIMEOUT 10000
2620
2621 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2622 {
2623         int i;
2624
2625         tw32(NVRAM_CMD, nvram_cmd);
2626         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2627                 udelay(10);
2628                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2629                         udelay(10);
2630                         break;
2631                 }
2632         }
2633
2634         if (i == NVRAM_CMD_TIMEOUT)
2635                 return -EBUSY;
2636
2637         return 0;
2638 }
2639
2640 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2641 {
2642         if (tg3_flag(tp, NVRAM) &&
2643             tg3_flag(tp, NVRAM_BUFFERED) &&
2644             tg3_flag(tp, FLASH) &&
2645             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2646             (tp->nvram_jedecnum == JEDEC_ATMEL))
2647
2648                 addr = ((addr / tp->nvram_pagesize) <<
2649                         ATMEL_AT45DB0X1B_PAGE_POS) +
2650                        (addr % tp->nvram_pagesize);
2651
2652         return addr;
2653 }
2654
2655 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2656 {
2657         if (tg3_flag(tp, NVRAM) &&
2658             tg3_flag(tp, NVRAM_BUFFERED) &&
2659             tg3_flag(tp, FLASH) &&
2660             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2661             (tp->nvram_jedecnum == JEDEC_ATMEL))
2662
2663                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2664                         tp->nvram_pagesize) +
2665                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2666
2667         return addr;
2668 }
2669
2670 /* NOTE: Data read in from NVRAM is byteswapped according to
2671  * the byteswapping settings for all other register accesses.
2672  * tg3 devices are BE devices, so on a BE machine, the data
2673  * returned will be exactly as it is seen in NVRAM.  On a LE
2674  * machine, the 32-bit value will be byteswapped.
2675  */
2676 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2677 {
2678         int ret;
2679
2680         if (!tg3_flag(tp, NVRAM))
2681                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2682
2683         offset = tg3_nvram_phys_addr(tp, offset);
2684
2685         if (offset > NVRAM_ADDR_MSK)
2686                 return -EINVAL;
2687
2688         ret = tg3_nvram_lock(tp);
2689         if (ret)
2690                 return ret;
2691
2692         tg3_enable_nvram_access(tp);
2693
2694         tw32(NVRAM_ADDR, offset);
2695         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2696                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2697
2698         if (ret == 0)
2699                 *val = tr32(NVRAM_RDDATA);
2700
2701         tg3_disable_nvram_access(tp);
2702
2703         tg3_nvram_unlock(tp);
2704
2705         return ret;
2706 }
2707
2708 /* Ensures NVRAM data is in bytestream format. */
2709 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2710 {
2711         u32 v;
2712         int res = tg3_nvram_read(tp, offset, &v);
2713         if (!res)
2714                 *val = cpu_to_be32(v);
2715         return res;
2716 }
2717
2718 /* tp->lock is held. */
2719 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2720 {
2721         u32 addr_high, addr_low;
2722         int i;
2723
2724         addr_high = ((tp->dev->dev_addr[0] << 8) |
2725                      tp->dev->dev_addr[1]);
2726         addr_low = ((tp->dev->dev_addr[2] << 24) |
2727                     (tp->dev->dev_addr[3] << 16) |
2728                     (tp->dev->dev_addr[4] <<  8) |
2729                     (tp->dev->dev_addr[5] <<  0));
2730         for (i = 0; i < 4; i++) {
2731                 if (i == 1 && skip_mac_1)
2732                         continue;
2733                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2734                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2735         }
2736
2737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2738             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2739                 for (i = 0; i < 12; i++) {
2740                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2741                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2742                 }
2743         }
2744
2745         addr_high = (tp->dev->dev_addr[0] +
2746                      tp->dev->dev_addr[1] +
2747                      tp->dev->dev_addr[2] +
2748                      tp->dev->dev_addr[3] +
2749                      tp->dev->dev_addr[4] +
2750                      tp->dev->dev_addr[5]) &
2751                 TX_BACKOFF_SEED_MASK;
2752         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2753 }
2754
2755 static void tg3_enable_register_access(struct tg3 *tp)
2756 {
2757         /*
2758          * Make sure register accesses (indirect or otherwise) will function
2759          * correctly.
2760          */
2761         pci_write_config_dword(tp->pdev,
2762                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2763 }
2764
2765 static int tg3_power_up(struct tg3 *tp)
2766 {
2767         int err;
2768
2769         tg3_enable_register_access(tp);
2770
2771         err = pci_set_power_state(tp->pdev, PCI_D0);
2772         if (!err) {
2773                 /* Switch out of Vaux if it is a NIC */
2774                 tg3_pwrsrc_switch_to_vmain(tp);
2775         } else {
2776                 netdev_err(tp->dev, "Transition to D0 failed\n");
2777         }
2778
2779         return err;
2780 }
2781
2782 static int tg3_power_down_prepare(struct tg3 *tp)
2783 {
2784         u32 misc_host_ctrl;
2785         bool device_should_wake, do_low_power;
2786
2787         tg3_enable_register_access(tp);
2788
2789         /* Restore the CLKREQ setting. */
2790         if (tg3_flag(tp, CLKREQ_BUG)) {
2791                 u16 lnkctl;
2792
2793                 pci_read_config_word(tp->pdev,
2794                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2795                                      &lnkctl);
2796                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2797                 pci_write_config_word(tp->pdev,
2798                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2799                                       lnkctl);
2800         }
2801
2802         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2803         tw32(TG3PCI_MISC_HOST_CTRL,
2804              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2805
2806         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2807                              tg3_flag(tp, WOL_ENABLE);
2808
2809         if (tg3_flag(tp, USE_PHYLIB)) {
2810                 do_low_power = false;
2811                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2812                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2813                         struct phy_device *phydev;
2814                         u32 phyid, advertising;
2815
2816                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2817
2818                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2819
2820                         tp->link_config.orig_speed = phydev->speed;
2821                         tp->link_config.orig_duplex = phydev->duplex;
2822                         tp->link_config.orig_autoneg = phydev->autoneg;
2823                         tp->link_config.orig_advertising = phydev->advertising;
2824
2825                         advertising = ADVERTISED_TP |
2826                                       ADVERTISED_Pause |
2827                                       ADVERTISED_Autoneg |
2828                                       ADVERTISED_10baseT_Half;
2829
2830                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2831                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2832                                         advertising |=
2833                                                 ADVERTISED_100baseT_Half |
2834                                                 ADVERTISED_100baseT_Full |
2835                                                 ADVERTISED_10baseT_Full;
2836                                 else
2837                                         advertising |= ADVERTISED_10baseT_Full;
2838                         }
2839
2840                         phydev->advertising = advertising;
2841
2842                         phy_start_aneg(phydev);
2843
2844                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2845                         if (phyid != PHY_ID_BCMAC131) {
2846                                 phyid &= PHY_BCM_OUI_MASK;
2847                                 if (phyid == PHY_BCM_OUI_1 ||
2848                                     phyid == PHY_BCM_OUI_2 ||
2849                                     phyid == PHY_BCM_OUI_3)
2850                                         do_low_power = true;
2851                         }
2852                 }
2853         } else {
2854                 do_low_power = true;
2855
2856                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2857                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2858                         tp->link_config.orig_speed = tp->link_config.speed;
2859                         tp->link_config.orig_duplex = tp->link_config.duplex;
2860                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2861                 }
2862
2863                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2864                         tp->link_config.speed = SPEED_10;
2865                         tp->link_config.duplex = DUPLEX_HALF;
2866                         tp->link_config.autoneg = AUTONEG_ENABLE;
2867                         tg3_setup_phy(tp, 0);
2868                 }
2869         }
2870
2871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2872                 u32 val;
2873
2874                 val = tr32(GRC_VCPU_EXT_CTRL);
2875                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2876         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2877                 int i;
2878                 u32 val;
2879
2880                 for (i = 0; i < 200; i++) {
2881                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2882                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2883                                 break;
2884                         msleep(1);
2885                 }
2886         }
2887         if (tg3_flag(tp, WOL_CAP))
2888                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2889                                                      WOL_DRV_STATE_SHUTDOWN |
2890                                                      WOL_DRV_WOL |
2891                                                      WOL_SET_MAGIC_PKT);
2892
2893         if (device_should_wake) {
2894                 u32 mac_mode;
2895
2896                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2897                         if (do_low_power &&
2898                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2899                                 tg3_phy_auxctl_write(tp,
2900                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2901                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2902                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2903                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2904                                 udelay(40);
2905                         }
2906
2907                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2908                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2909                         else
2910                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2911
2912                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2913                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2914                             ASIC_REV_5700) {
2915                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2916                                              SPEED_100 : SPEED_10;
2917                                 if (tg3_5700_link_polarity(tp, speed))
2918                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2919                                 else
2920                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2921                         }
2922                 } else {
2923                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2924                 }
2925
2926                 if (!tg3_flag(tp, 5750_PLUS))
2927                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2928
2929                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2930                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2931                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2932                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2933
2934                 if (tg3_flag(tp, ENABLE_APE))
2935                         mac_mode |= MAC_MODE_APE_TX_EN |
2936                                     MAC_MODE_APE_RX_EN |
2937                                     MAC_MODE_TDE_ENABLE;
2938
2939                 tw32_f(MAC_MODE, mac_mode);
2940                 udelay(100);
2941
2942                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2943                 udelay(10);
2944         }
2945
2946         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2947             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2948              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2949                 u32 base_val;
2950
2951                 base_val = tp->pci_clock_ctrl;
2952                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2953                              CLOCK_CTRL_TXCLK_DISABLE);
2954
2955                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2956                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2957         } else if (tg3_flag(tp, 5780_CLASS) ||
2958                    tg3_flag(tp, CPMU_PRESENT) ||
2959                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2960                 /* do nothing */
2961         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2962                 u32 newbits1, newbits2;
2963
2964                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2965                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2966                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2967                                     CLOCK_CTRL_TXCLK_DISABLE |
2968                                     CLOCK_CTRL_ALTCLK);
2969                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2970                 } else if (tg3_flag(tp, 5705_PLUS)) {
2971                         newbits1 = CLOCK_CTRL_625_CORE;
2972                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2973                 } else {
2974                         newbits1 = CLOCK_CTRL_ALTCLK;
2975                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2976                 }
2977
2978                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2979                             40);
2980
2981                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2982                             40);
2983
2984                 if (!tg3_flag(tp, 5705_PLUS)) {
2985                         u32 newbits3;
2986
2987                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2988                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2989                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2990                                             CLOCK_CTRL_TXCLK_DISABLE |
2991                                             CLOCK_CTRL_44MHZ_CORE);
2992                         } else {
2993                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2994                         }
2995
2996                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2997                                     tp->pci_clock_ctrl | newbits3, 40);
2998                 }
2999         }
3000
3001         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3002                 tg3_power_down_phy(tp, do_low_power);
3003
3004         tg3_frob_aux_power(tp, true);
3005
3006         /* Workaround for unstable PLL clock */
3007         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3008             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3009                 u32 val = tr32(0x7d00);
3010
3011                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3012                 tw32(0x7d00, val);
3013                 if (!tg3_flag(tp, ENABLE_ASF)) {
3014                         int err;
3015
3016                         err = tg3_nvram_lock(tp);
3017                         tg3_halt_cpu(tp, RX_CPU_BASE);
3018                         if (!err)
3019                                 tg3_nvram_unlock(tp);
3020                 }
3021         }
3022
3023         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3024
3025         return 0;
3026 }
3027
3028 static void tg3_power_down(struct tg3 *tp)
3029 {
3030         tg3_power_down_prepare(tp);
3031
3032         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3033         pci_set_power_state(tp->pdev, PCI_D3hot);
3034 }
3035
3036 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3037 {
3038         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3039         case MII_TG3_AUX_STAT_10HALF:
3040                 *speed = SPEED_10;
3041                 *duplex = DUPLEX_HALF;
3042                 break;
3043
3044         case MII_TG3_AUX_STAT_10FULL:
3045                 *speed = SPEED_10;
3046                 *duplex = DUPLEX_FULL;
3047                 break;
3048
3049         case MII_TG3_AUX_STAT_100HALF:
3050                 *speed = SPEED_100;
3051                 *duplex = DUPLEX_HALF;
3052                 break;
3053
3054         case MII_TG3_AUX_STAT_100FULL:
3055                 *speed = SPEED_100;
3056                 *duplex = DUPLEX_FULL;
3057                 break;
3058
3059         case MII_TG3_AUX_STAT_1000HALF:
3060                 *speed = SPEED_1000;
3061                 *duplex = DUPLEX_HALF;
3062                 break;
3063
3064         case MII_TG3_AUX_STAT_1000FULL:
3065                 *speed = SPEED_1000;
3066                 *duplex = DUPLEX_FULL;
3067                 break;
3068
3069         default:
3070                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3071                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3072                                  SPEED_10;
3073                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3074                                   DUPLEX_HALF;
3075                         break;
3076                 }
3077                 *speed = SPEED_INVALID;
3078                 *duplex = DUPLEX_INVALID;
3079                 break;
3080         }
3081 }
3082
3083 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3084 {
3085         int err = 0;
3086         u32 val, new_adv;
3087
3088         new_adv = ADVERTISE_CSMA;
3089         if (advertise & ADVERTISED_10baseT_Half)
3090                 new_adv |= ADVERTISE_10HALF;
3091         if (advertise & ADVERTISED_10baseT_Full)
3092                 new_adv |= ADVERTISE_10FULL;
3093         if (advertise & ADVERTISED_100baseT_Half)
3094                 new_adv |= ADVERTISE_100HALF;
3095         if (advertise & ADVERTISED_100baseT_Full)
3096                 new_adv |= ADVERTISE_100FULL;
3097
3098         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3099
3100         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3101         if (err)
3102                 goto done;
3103
3104         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3105                 goto done;
3106
3107         new_adv = 0;
3108         if (advertise & ADVERTISED_1000baseT_Half)
3109                 new_adv |= ADVERTISE_1000HALF;
3110         if (advertise & ADVERTISED_1000baseT_Full)
3111                 new_adv |= ADVERTISE_1000FULL;
3112
3113         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3114             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3115                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3116
3117         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3118         if (err)
3119                 goto done;
3120
3121         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3122                 goto done;
3123
3124         tw32(TG3_CPMU_EEE_MODE,
3125              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3126
3127         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3128         if (!err) {
3129                 u32 err2;
3130
3131                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3132                 case ASIC_REV_5717:
3133                 case ASIC_REV_57765:
3134                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3135                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3136                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3137                         /* Fall through */
3138                 case ASIC_REV_5719:
3139                         val = MII_TG3_DSP_TAP26_ALNOKO |
3140                               MII_TG3_DSP_TAP26_RMRXSTO |
3141                               MII_TG3_DSP_TAP26_OPCSINPT;
3142                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3143                 }
3144
3145                 val = 0;
3146                 /* Advertise 100-BaseTX EEE ability */
3147                 if (advertise & ADVERTISED_100baseT_Full)
3148                         val |= MDIO_AN_EEE_ADV_100TX;
3149                 /* Advertise 1000-BaseT EEE ability */
3150                 if (advertise & ADVERTISED_1000baseT_Full)
3151                         val |= MDIO_AN_EEE_ADV_1000T;
3152                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3153
3154                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3155                 if (!err)
3156                         err = err2;
3157         }
3158
3159 done:
3160         return err;
3161 }
3162
3163 static void tg3_phy_copper_begin(struct tg3 *tp)
3164 {
3165         u32 new_adv;
3166         int i;
3167
3168         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3169                 new_adv = ADVERTISED_10baseT_Half |
3170                           ADVERTISED_10baseT_Full;
3171                 if (tg3_flag(tp, WOL_SPEED_100MB))
3172                         new_adv |= ADVERTISED_100baseT_Half |
3173                                    ADVERTISED_100baseT_Full;
3174
3175                 tg3_phy_autoneg_cfg(tp, new_adv,
3176                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3177         } else if (tp->link_config.speed == SPEED_INVALID) {
3178                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3179                         tp->link_config.advertising &=
3180                                 ~(ADVERTISED_1000baseT_Half |
3181                                   ADVERTISED_1000baseT_Full);
3182
3183                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3184                                     tp->link_config.flowctrl);
3185         } else {
3186                 /* Asking for a specific link mode. */
3187                 if (tp->link_config.speed == SPEED_1000) {
3188                         if (tp->link_config.duplex == DUPLEX_FULL)
3189                                 new_adv = ADVERTISED_1000baseT_Full;
3190                         else
3191                                 new_adv = ADVERTISED_1000baseT_Half;
3192                 } else if (tp->link_config.speed == SPEED_100) {
3193                         if (tp->link_config.duplex == DUPLEX_FULL)
3194                                 new_adv = ADVERTISED_100baseT_Full;
3195                         else
3196                                 new_adv = ADVERTISED_100baseT_Half;
3197                 } else {
3198                         if (tp->link_config.duplex == DUPLEX_FULL)
3199                                 new_adv = ADVERTISED_10baseT_Full;
3200                         else
3201                                 new_adv = ADVERTISED_10baseT_Half;
3202                 }
3203
3204                 tg3_phy_autoneg_cfg(tp, new_adv,
3205                                     tp->link_config.flowctrl);
3206         }
3207
3208         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3209             tp->link_config.speed != SPEED_INVALID) {
3210                 u32 bmcr, orig_bmcr;
3211
3212                 tp->link_config.active_speed = tp->link_config.speed;
3213                 tp->link_config.active_duplex = tp->link_config.duplex;
3214
3215                 bmcr = 0;
3216                 switch (tp->link_config.speed) {
3217                 default:
3218                 case SPEED_10:
3219                         break;
3220
3221                 case SPEED_100:
3222                         bmcr |= BMCR_SPEED100;
3223                         break;
3224
3225                 case SPEED_1000:
3226                         bmcr |= BMCR_SPEED1000;
3227                         break;
3228                 }
3229
3230                 if (tp->link_config.duplex == DUPLEX_FULL)
3231                         bmcr |= BMCR_FULLDPLX;
3232
3233                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3234                     (bmcr != orig_bmcr)) {
3235                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3236                         for (i = 0; i < 1500; i++) {
3237                                 u32 tmp;
3238
3239                                 udelay(10);
3240                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3241                                     tg3_readphy(tp, MII_BMSR, &tmp))
3242                                         continue;
3243                                 if (!(tmp & BMSR_LSTATUS)) {
3244                                         udelay(40);
3245                                         break;
3246                                 }
3247                         }
3248                         tg3_writephy(tp, MII_BMCR, bmcr);
3249                         udelay(40);
3250                 }
3251         } else {
3252                 tg3_writephy(tp, MII_BMCR,
3253                              BMCR_ANENABLE | BMCR_ANRESTART);
3254         }
3255 }
3256
3257 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3258 {
3259         int err;
3260
3261         /* Turn off tap power management. */
3262         /* Set Extended packet length bit */
3263         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3264
3265         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3266         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3267         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3268         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3269         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3270
3271         udelay(40);
3272
3273         return err;
3274 }
3275
3276 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3277 {
3278         u32 adv_reg, all_mask = 0;
3279
3280         if (mask & ADVERTISED_10baseT_Half)
3281                 all_mask |= ADVERTISE_10HALF;
3282         if (mask & ADVERTISED_10baseT_Full)
3283                 all_mask |= ADVERTISE_10FULL;
3284         if (mask & ADVERTISED_100baseT_Half)
3285                 all_mask |= ADVERTISE_100HALF;
3286         if (mask & ADVERTISED_100baseT_Full)
3287                 all_mask |= ADVERTISE_100FULL;
3288
3289         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3290                 return 0;
3291
3292         if ((adv_reg & all_mask) != all_mask)
3293                 return 0;
3294         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3295                 u32 tg3_ctrl;
3296
3297                 all_mask = 0;
3298                 if (mask & ADVERTISED_1000baseT_Half)
3299                         all_mask |= ADVERTISE_1000HALF;
3300                 if (mask & ADVERTISED_1000baseT_Full)
3301                         all_mask |= ADVERTISE_1000FULL;
3302
3303                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3304                         return 0;
3305
3306                 if ((tg3_ctrl & all_mask) != all_mask)
3307                         return 0;
3308         }
3309         return 1;
3310 }
3311
3312 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3313 {
3314         u32 curadv, reqadv;
3315
3316         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3317                 return 1;
3318
3319         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3320         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3321
3322         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3323                 if (curadv != reqadv)
3324                         return 0;
3325
3326                 if (tg3_flag(tp, PAUSE_AUTONEG))
3327                         tg3_readphy(tp, MII_LPA, rmtadv);
3328         } else {
3329                 /* Reprogram the advertisement register, even if it
3330                  * does not affect the current link.  If the link
3331                  * gets renegotiated in the future, we can save an
3332                  * additional renegotiation cycle by advertising
3333                  * it correctly in the first place.
3334                  */
3335                 if (curadv != reqadv) {
3336                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3337                                      ADVERTISE_PAUSE_ASYM);
3338                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3339                 }
3340         }
3341
3342         return 1;
3343 }
3344
3345 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3346 {
3347         int current_link_up;
3348         u32 bmsr, val;
3349         u32 lcl_adv, rmt_adv;
3350         u16 current_speed;
3351         u8 current_duplex;
3352         int i, err;
3353
3354         tw32(MAC_EVENT, 0);
3355
3356         tw32_f(MAC_STATUS,
3357              (MAC_STATUS_SYNC_CHANGED |
3358               MAC_STATUS_CFG_CHANGED |
3359               MAC_STATUS_MI_COMPLETION |
3360               MAC_STATUS_LNKSTATE_CHANGED));
3361         udelay(40);
3362
3363         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3364                 tw32_f(MAC_MI_MODE,
3365                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3366                 udelay(80);
3367         }
3368
3369         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3370
3371         /* Some third-party PHYs need to be reset on link going
3372          * down.
3373          */
3374         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3375              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3376              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3377             netif_carrier_ok(tp->dev)) {
3378                 tg3_readphy(tp, MII_BMSR, &bmsr);
3379                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3380                     !(bmsr & BMSR_LSTATUS))
3381                         force_reset = 1;
3382         }
3383         if (force_reset)
3384                 tg3_phy_reset(tp);
3385
3386         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3387                 tg3_readphy(tp, MII_BMSR, &bmsr);
3388                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3389                     !tg3_flag(tp, INIT_COMPLETE))
3390                         bmsr = 0;
3391
3392                 if (!(bmsr & BMSR_LSTATUS)) {
3393                         err = tg3_init_5401phy_dsp(tp);
3394                         if (err)
3395                                 return err;
3396
3397                         tg3_readphy(tp, MII_BMSR, &bmsr);
3398                         for (i = 0; i < 1000; i++) {
3399                                 udelay(10);
3400                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3401                                     (bmsr & BMSR_LSTATUS)) {
3402                                         udelay(40);
3403                                         break;
3404                                 }
3405                         }
3406
3407                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3408                             TG3_PHY_REV_BCM5401_B0 &&
3409                             !(bmsr & BMSR_LSTATUS) &&
3410                             tp->link_config.active_speed == SPEED_1000) {
3411                                 err = tg3_phy_reset(tp);
3412                                 if (!err)
3413                                         err = tg3_init_5401phy_dsp(tp);
3414                                 if (err)
3415                                         return err;
3416                         }
3417                 }
3418         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3419                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3420                 /* 5701 {A0,B0} CRC bug workaround */
3421                 tg3_writephy(tp, 0x15, 0x0a75);
3422                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3423                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3424                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3425         }
3426
3427         /* Clear pending interrupts... */
3428         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3429         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3430
3431         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3432                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3433         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3434                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3435
3436         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3438                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3439                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3440                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3441                 else
3442                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3443         }
3444
3445         current_link_up = 0;
3446         current_speed = SPEED_INVALID;
3447         current_duplex = DUPLEX_INVALID;
3448
3449         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3450                 err = tg3_phy_auxctl_read(tp,
3451                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3452                                           &val);
3453                 if (!err && !(val & (1 << 10))) {
3454                         tg3_phy_auxctl_write(tp,
3455                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3456                                              val | (1 << 10));
3457                         goto relink;
3458                 }
3459         }
3460
3461         bmsr = 0;
3462         for (i = 0; i < 100; i++) {
3463                 tg3_readphy(tp, MII_BMSR, &bmsr);
3464                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3465                     (bmsr & BMSR_LSTATUS))
3466                         break;
3467                 udelay(40);
3468         }
3469
3470         if (bmsr & BMSR_LSTATUS) {
3471                 u32 aux_stat, bmcr;
3472
3473                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3474                 for (i = 0; i < 2000; i++) {
3475                         udelay(10);
3476                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3477                             aux_stat)
3478                                 break;
3479                 }
3480
3481                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3482                                              &current_speed,
3483                                              &current_duplex);
3484
3485                 bmcr = 0;
3486                 for (i = 0; i < 200; i++) {
3487                         tg3_readphy(tp, MII_BMCR, &bmcr);
3488                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3489                                 continue;
3490                         if (bmcr && bmcr != 0x7fff)
3491                                 break;
3492                         udelay(10);
3493                 }
3494
3495                 lcl_adv = 0;
3496                 rmt_adv = 0;
3497
3498                 tp->link_config.active_speed = current_speed;
3499                 tp->link_config.active_duplex = current_duplex;
3500
3501                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3502                         if ((bmcr & BMCR_ANENABLE) &&
3503                             tg3_copper_is_advertising_all(tp,
3504                                                 tp->link_config.advertising)) {
3505                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3506                                                                   &rmt_adv))
3507                                         current_link_up = 1;
3508                         }
3509                 } else {
3510                         if (!(bmcr & BMCR_ANENABLE) &&
3511                             tp->link_config.speed == current_speed &&
3512                             tp->link_config.duplex == current_duplex &&
3513                             tp->link_config.flowctrl ==
3514                             tp->link_config.active_flowctrl) {
3515                                 current_link_up = 1;
3516                         }
3517                 }
3518
3519                 if (current_link_up == 1 &&
3520                     tp->link_config.active_duplex == DUPLEX_FULL)
3521                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3522         }
3523
3524 relink:
3525         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3526                 tg3_phy_copper_begin(tp);
3527
3528                 tg3_readphy(tp, MII_BMSR, &bmsr);
3529                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3530                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3531                         current_link_up = 1;
3532         }
3533
3534         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3535         if (current_link_up == 1) {
3536                 if (tp->link_config.active_speed == SPEED_100 ||
3537                     tp->link_config.active_speed == SPEED_10)
3538                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3539                 else
3540                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3541         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3542                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3543         else
3544                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3545
3546         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3547         if (tp->link_config.active_duplex == DUPLEX_HALF)
3548                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3549
3550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3551                 if (current_link_up == 1 &&
3552                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3553                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3554                 else
3555                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3556         }
3557
3558         /* ??? Without this setting Netgear GA302T PHY does not
3559          * ??? send/receive packets...
3560          */
3561         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3562             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3563                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3564                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3565                 udelay(80);
3566         }
3567
3568         tw32_f(MAC_MODE, tp->mac_mode);
3569         udelay(40);
3570
3571         tg3_phy_eee_adjust(tp, current_link_up);
3572
3573         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3574                 /* Polled via timer. */
3575                 tw32_f(MAC_EVENT, 0);
3576         } else {
3577                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3578         }
3579         udelay(40);
3580
3581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3582             current_link_up == 1 &&
3583             tp->link_config.active_speed == SPEED_1000 &&
3584             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3585                 udelay(120);
3586                 tw32_f(MAC_STATUS,
3587                      (MAC_STATUS_SYNC_CHANGED |
3588                       MAC_STATUS_CFG_CHANGED));
3589                 udelay(40);
3590                 tg3_write_mem(tp,
3591                               NIC_SRAM_FIRMWARE_MBOX,
3592                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3593         }
3594
3595         /* Prevent send BD corruption. */
3596         if (tg3_flag(tp, CLKREQ_BUG)) {
3597                 u16 oldlnkctl, newlnkctl;
3598
3599                 pci_read_config_word(tp->pdev,
3600                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3601                                      &oldlnkctl);
3602                 if (tp->link_config.active_speed == SPEED_100 ||
3603                     tp->link_config.active_speed == SPEED_10)
3604                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3605                 else
3606                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3607                 if (newlnkctl != oldlnkctl)
3608                         pci_write_config_word(tp->pdev,
3609                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3610                                               newlnkctl);
3611         }
3612
3613         if (current_link_up != netif_carrier_ok(tp->dev)) {
3614                 if (current_link_up)
3615                         netif_carrier_on(tp->dev);
3616                 else
3617                         netif_carrier_off(tp->dev);
3618                 tg3_link_report(tp);
3619         }
3620
3621         return 0;
3622 }
3623
3624 struct tg3_fiber_aneginfo {
3625         int state;
3626 #define ANEG_STATE_UNKNOWN              0
3627 #define ANEG_STATE_AN_ENABLE            1
3628 #define ANEG_STATE_RESTART_INIT         2
3629 #define ANEG_STATE_RESTART              3
3630 #define ANEG_STATE_DISABLE_LINK_OK      4
3631 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3632 #define ANEG_STATE_ABILITY_DETECT       6
3633 #define ANEG_STATE_ACK_DETECT_INIT      7
3634 #define ANEG_STATE_ACK_DETECT           8
3635 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3636 #define ANEG_STATE_COMPLETE_ACK         10
3637 #define ANEG_STATE_IDLE_DETECT_INIT     11
3638 #define ANEG_STATE_IDLE_DETECT          12
3639 #define ANEG_STATE_LINK_OK              13
3640 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3641 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3642
3643         u32 flags;
3644 #define MR_AN_ENABLE            0x00000001
3645 #define MR_RESTART_AN           0x00000002
3646 #define MR_AN_COMPLETE          0x00000004
3647 #define MR_PAGE_RX              0x00000008
3648 #define MR_NP_LOADED            0x00000010
3649 #define MR_TOGGLE_TX            0x00000020
3650 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3651 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3652 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3653 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3654 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3655 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3656 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3657 #define MR_TOGGLE_RX            0x00002000
3658 #define MR_NP_RX                0x00004000
3659
3660 #define MR_LINK_OK              0x80000000
3661
3662         unsigned long link_time, cur_time;
3663
3664         u32 ability_match_cfg;
3665         int ability_match_count;
3666
3667         char ability_match, idle_match, ack_match;
3668
3669         u32 txconfig, rxconfig;
3670 #define ANEG_CFG_NP             0x00000080
3671 #define ANEG_CFG_ACK            0x00000040
3672 #define ANEG_CFG_RF2            0x00000020
3673 #define ANEG_CFG_RF1            0x00000010
3674 #define ANEG_CFG_PS2            0x00000001
3675 #define ANEG_CFG_PS1            0x00008000
3676 #define ANEG_CFG_HD             0x00004000
3677 #define ANEG_CFG_FD             0x00002000
3678 #define ANEG_CFG_INVAL          0x00001f06
3679
3680 };
3681 #define ANEG_OK         0
3682 #define ANEG_DONE       1
3683 #define ANEG_TIMER_ENAB 2
3684 #define ANEG_FAILED     -1
3685
3686 #define ANEG_STATE_SETTLE_TIME  10000
3687
3688 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3689                                    struct tg3_fiber_aneginfo *ap)
3690 {
3691         u16 flowctrl;
3692         unsigned long delta;
3693         u32 rx_cfg_reg;
3694         int ret;
3695
3696         if (ap->state == ANEG_STATE_UNKNOWN) {
3697                 ap->rxconfig = 0;
3698                 ap->link_time = 0;
3699                 ap->cur_time = 0;
3700                 ap->ability_match_cfg = 0;
3701                 ap->ability_match_count = 0;
3702                 ap->ability_match = 0;
3703                 ap->idle_match = 0;
3704                 ap->ack_match = 0;
3705         }
3706         ap->cur_time++;
3707
3708         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3709                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3710
3711                 if (rx_cfg_reg != ap->ability_match_cfg) {
3712                         ap->ability_match_cfg = rx_cfg_reg;
3713                         ap->ability_match = 0;
3714                         ap->ability_match_count = 0;
3715                 } else {
3716                         if (++ap->ability_match_count > 1) {
3717                                 ap->ability_match = 1;
3718                                 ap->ability_match_cfg = rx_cfg_reg;
3719                         }
3720                 }
3721                 if (rx_cfg_reg & ANEG_CFG_ACK)
3722                         ap->ack_match = 1;
3723                 else
3724                         ap->ack_match = 0;
3725
3726                 ap->idle_match = 0;
3727         } else {
3728                 ap->idle_match = 1;
3729                 ap->ability_match_cfg = 0;
3730                 ap->ability_match_count = 0;
3731                 ap->ability_match = 0;
3732                 ap->ack_match = 0;
3733
3734                 rx_cfg_reg = 0;
3735         }
3736
3737         ap->rxconfig = rx_cfg_reg;
3738         ret = ANEG_OK;
3739
3740         switch (ap->state) {
3741         case ANEG_STATE_UNKNOWN:
3742                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3743                         ap->state = ANEG_STATE_AN_ENABLE;
3744
3745                 /* fallthru */
3746         case ANEG_STATE_AN_ENABLE:
3747                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3748                 if (ap->flags & MR_AN_ENABLE) {
3749                         ap->link_time = 0;
3750                         ap->cur_time = 0;
3751                         ap->ability_match_cfg = 0;
3752                         ap->ability_match_count = 0;
3753                         ap->ability_match = 0;
3754                         ap->idle_match = 0;
3755                         ap->ack_match = 0;
3756
3757                         ap->state = ANEG_STATE_RESTART_INIT;
3758                 } else {
3759                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3760                 }
3761                 break;
3762
3763         case ANEG_STATE_RESTART_INIT:
3764                 ap->link_time = ap->cur_time;
3765                 ap->flags &= ~(MR_NP_LOADED);
3766                 ap->txconfig = 0;
3767                 tw32(MAC_TX_AUTO_NEG, 0);
3768                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3769                 tw32_f(MAC_MODE, tp->mac_mode);
3770                 udelay(40);
3771
3772                 ret = ANEG_TIMER_ENAB;
3773                 ap->state = ANEG_STATE_RESTART;
3774
3775                 /* fallthru */
3776         case ANEG_STATE_RESTART:
3777                 delta = ap->cur_time - ap->link_time;
3778                 if (delta > ANEG_STATE_SETTLE_TIME)
3779                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3780                 else
3781                         ret = ANEG_TIMER_ENAB;
3782                 break;
3783
3784         case ANEG_STATE_DISABLE_LINK_OK:
3785                 ret = ANEG_DONE;
3786                 break;
3787
3788         case ANEG_STATE_ABILITY_DETECT_INIT:
3789                 ap->flags &= ~(MR_TOGGLE_TX);
3790                 ap->txconfig = ANEG_CFG_FD;
3791                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3792                 if (flowctrl & ADVERTISE_1000XPAUSE)
3793                         ap->txconfig |= ANEG_CFG_PS1;
3794                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3795                         ap->txconfig |= ANEG_CFG_PS2;
3796                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3797                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3798                 tw32_f(MAC_MODE, tp->mac_mode);
3799                 udelay(40);
3800
3801                 ap->state = ANEG_STATE_ABILITY_DETECT;
3802                 break;
3803
3804         case ANEG_STATE_ABILITY_DETECT:
3805                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3806                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3807                 break;
3808
3809         case ANEG_STATE_ACK_DETECT_INIT:
3810                 ap->txconfig |= ANEG_CFG_ACK;
3811                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3812                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3813                 tw32_f(MAC_MODE, tp->mac_mode);
3814                 udelay(40);
3815
3816                 ap->state = ANEG_STATE_ACK_DETECT;
3817
3818                 /* fallthru */
3819         case ANEG_STATE_ACK_DETECT:
3820                 if (ap->ack_match != 0) {
3821                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3822                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3823                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3824                         } else {
3825                                 ap->state = ANEG_STATE_AN_ENABLE;
3826                         }
3827                 } else if (ap->ability_match != 0 &&
3828                            ap->rxconfig == 0) {
3829                         ap->state = ANEG_STATE_AN_ENABLE;
3830                 }
3831                 break;
3832
3833         case ANEG_STATE_COMPLETE_ACK_INIT:
3834                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3835                         ret = ANEG_FAILED;
3836                         break;
3837                 }
3838                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3839                                MR_LP_ADV_HALF_DUPLEX |
3840                                MR_LP_ADV_SYM_PAUSE |
3841                                MR_LP_ADV_ASYM_PAUSE |
3842                                MR_LP_ADV_REMOTE_FAULT1 |
3843                                MR_LP_ADV_REMOTE_FAULT2 |
3844                                MR_LP_ADV_NEXT_PAGE |
3845                                MR_TOGGLE_RX |
3846                                MR_NP_RX);
3847                 if (ap->rxconfig & ANEG_CFG_FD)
3848                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3849                 if (ap->rxconfig & ANEG_CFG_HD)
3850                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3851                 if (ap->rxconfig & ANEG_CFG_PS1)
3852                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3853                 if (ap->rxconfig & ANEG_CFG_PS2)
3854                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3855                 if (ap->rxconfig & ANEG_CFG_RF1)
3856                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3857                 if (ap->rxconfig & ANEG_CFG_RF2)
3858                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3859                 if (ap->rxconfig & ANEG_CFG_NP)
3860                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3861
3862                 ap->link_time = ap->cur_time;
3863
3864                 ap->flags ^= (MR_TOGGLE_TX);
3865                 if (ap->rxconfig & 0x0008)
3866                         ap->flags |= MR_TOGGLE_RX;
3867                 if (ap->rxconfig & ANEG_CFG_NP)
3868                         ap->flags |= MR_NP_RX;
3869                 ap->flags |= MR_PAGE_RX;
3870
3871                 ap->state = ANEG_STATE_COMPLETE_ACK;
3872                 ret = ANEG_TIMER_ENAB;
3873                 break;
3874
3875         case ANEG_STATE_COMPLETE_ACK:
3876                 if (ap->ability_match != 0 &&
3877                     ap->rxconfig == 0) {
3878                         ap->state = ANEG_STATE_AN_ENABLE;
3879                         break;
3880                 }
3881                 delta = ap->cur_time - ap->link_time;
3882                 if (delta > ANEG_STATE_SETTLE_TIME) {
3883                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3884                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3885                         } else {
3886                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3887                                     !(ap->flags & MR_NP_RX)) {
3888                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3889                                 } else {
3890                                         ret = ANEG_FAILED;
3891                                 }
3892                         }
3893                 }
3894                 break;
3895
3896         case ANEG_STATE_IDLE_DETECT_INIT:
3897                 ap->link_time = ap->cur_time;
3898                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3899                 tw32_f(MAC_MODE, tp->mac_mode);
3900                 udelay(40);
3901
3902                 ap->state = ANEG_STATE_IDLE_DETECT;
3903                 ret = ANEG_TIMER_ENAB;
3904                 break;
3905
3906         case ANEG_STATE_IDLE_DETECT:
3907                 if (ap->ability_match != 0 &&
3908                     ap->rxconfig == 0) {
3909                         ap->state = ANEG_STATE_AN_ENABLE;
3910                         break;
3911                 }
3912                 delta = ap->cur_time - ap->link_time;
3913                 if (delta > ANEG_STATE_SETTLE_TIME) {
3914                         /* XXX another gem from the Broadcom driver :( */
3915                         ap->state = ANEG_STATE_LINK_OK;
3916                 }
3917                 break;
3918
3919         case ANEG_STATE_LINK_OK:
3920                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3921                 ret = ANEG_DONE;
3922                 break;
3923
3924         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3925                 /* ??? unimplemented */
3926                 break;
3927
3928         case ANEG_STATE_NEXT_PAGE_WAIT:
3929                 /* ??? unimplemented */
3930                 break;
3931
3932         default:
3933                 ret = ANEG_FAILED;
3934                 break;
3935         }
3936
3937         return ret;
3938 }
3939
3940 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3941 {
3942         int res = 0;
3943         struct tg3_fiber_aneginfo aninfo;
3944         int status = ANEG_FAILED;
3945         unsigned int tick;
3946         u32 tmp;
3947
3948         tw32_f(MAC_TX_AUTO_NEG, 0);
3949
3950         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3951         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3952         udelay(40);
3953
3954         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3955         udelay(40);
3956
3957         memset(&aninfo, 0, sizeof(aninfo));
3958         aninfo.flags |= MR_AN_ENABLE;
3959         aninfo.state = ANEG_STATE_UNKNOWN;
3960         aninfo.cur_time = 0;
3961         tick = 0;
3962         while (++tick < 195000) {
3963                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3964                 if (status == ANEG_DONE || status == ANEG_FAILED)
3965                         break;
3966
3967                 udelay(1);
3968         }
3969
3970         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3971         tw32_f(MAC_MODE, tp->mac_mode);
3972         udelay(40);
3973
3974         *txflags = aninfo.txconfig;
3975         *rxflags = aninfo.flags;
3976
3977         if (status == ANEG_DONE &&
3978             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3979                              MR_LP_ADV_FULL_DUPLEX)))
3980                 res = 1;
3981
3982         return res;
3983 }
3984
3985 static void tg3_init_bcm8002(struct tg3 *tp)
3986 {
3987         u32 mac_status = tr32(MAC_STATUS);
3988         int i;
3989
3990         /* Reset when initting first time or we have a link. */
3991         if (tg3_flag(tp, INIT_COMPLETE) &&
3992             !(mac_status & MAC_STATUS_PCS_SYNCED))
3993                 return;
3994
3995         /* Set PLL lock range. */
3996         tg3_writephy(tp, 0x16, 0x8007);
3997
3998         /* SW reset */
3999         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4000
4001         /* Wait for reset to complete. */
4002         /* XXX schedule_timeout() ... */
4003         for (i = 0; i < 500; i++)
4004                 udelay(10);
4005
4006         /* Config mode; select PMA/Ch 1 regs. */
4007         tg3_writephy(tp, 0x10, 0x8411);
4008
4009         /* Enable auto-lock and comdet, select txclk for tx. */
4010         tg3_writephy(tp, 0x11, 0x0a10);
4011
4012         tg3_writephy(tp, 0x18, 0x00a0);
4013         tg3_writephy(tp, 0x16, 0x41ff);
4014
4015         /* Assert and deassert POR. */
4016         tg3_writephy(tp, 0x13, 0x0400);
4017         udelay(40);
4018         tg3_writephy(tp, 0x13, 0x0000);
4019
4020         tg3_writephy(tp, 0x11, 0x0a50);
4021         udelay(40);
4022         tg3_writephy(tp, 0x11, 0x0a10);
4023
4024         /* Wait for signal to stabilize */
4025         /* XXX schedule_timeout() ... */
4026         for (i = 0; i < 15000; i++)
4027                 udelay(10);
4028
4029         /* Deselect the channel register so we can read the PHYID
4030          * later.
4031          */
4032         tg3_writephy(tp, 0x10, 0x8011);
4033 }
4034
4035 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4036 {
4037         u16 flowctrl;
4038         u32 sg_dig_ctrl, sg_dig_status;
4039         u32 serdes_cfg, expected_sg_dig_ctrl;
4040         int workaround, port_a;
4041         int current_link_up;
4042
4043         serdes_cfg = 0;
4044         expected_sg_dig_ctrl = 0;
4045         workaround = 0;
4046         port_a = 1;
4047         current_link_up = 0;
4048
4049         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4050             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4051                 workaround = 1;
4052                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4053                         port_a = 0;
4054
4055                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4056                 /* preserve bits 20-23 for voltage regulator */
4057                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4058         }
4059
4060         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4061
4062         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4063                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4064                         if (workaround) {
4065                                 u32 val = serdes_cfg;
4066
4067                                 if (port_a)
4068                                         val |= 0xc010000;
4069                                 else
4070                                         val |= 0x4010000;
4071                                 tw32_f(MAC_SERDES_CFG, val);
4072                         }
4073
4074                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4075                 }
4076                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4077                         tg3_setup_flow_control(tp, 0, 0);
4078                         current_link_up = 1;
4079                 }
4080                 goto out;
4081         }
4082
4083         /* Want auto-negotiation.  */
4084         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4085
4086         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4087         if (flowctrl & ADVERTISE_1000XPAUSE)
4088                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4089         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4090                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4091
4092         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4093                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4094                     tp->serdes_counter &&
4095                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4096                                     MAC_STATUS_RCVD_CFG)) ==
4097                      MAC_STATUS_PCS_SYNCED)) {
4098                         tp->serdes_counter--;
4099                         current_link_up = 1;
4100                         goto out;
4101                 }
4102 restart_autoneg:
4103                 if (workaround)
4104                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4105                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4106                 udelay(5);
4107                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4108
4109                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4110                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4111         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4112                                  MAC_STATUS_SIGNAL_DET)) {
4113                 sg_dig_status = tr32(SG_DIG_STATUS);
4114                 mac_status = tr32(MAC_STATUS);
4115
4116                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4117                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4118                         u32 local_adv = 0, remote_adv = 0;
4119
4120                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4121                                 local_adv |= ADVERTISE_1000XPAUSE;
4122                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4123                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4124
4125                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4126                                 remote_adv |= LPA_1000XPAUSE;
4127                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4128                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4129
4130                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4131                         current_link_up = 1;
4132                         tp->serdes_counter = 0;
4133                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4134                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4135                         if (tp->serdes_counter)
4136                                 tp->serdes_counter--;
4137                         else {
4138                                 if (workaround) {
4139                                         u32 val = serdes_cfg;
4140
4141                                         if (port_a)
4142                                                 val |= 0xc010000;
4143                                         else
4144                                                 val |= 0x4010000;
4145
4146                                         tw32_f(MAC_SERDES_CFG, val);
4147                                 }
4148
4149                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4150                                 udelay(40);
4151
4152                                 /* Link parallel detection - link is up */
4153                                 /* only if we have PCS_SYNC and not */
4154                                 /* receiving config code words */
4155                                 mac_status = tr32(MAC_STATUS);
4156                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4157                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4158                                         tg3_setup_flow_control(tp, 0, 0);
4159                                         current_link_up = 1;
4160                                         tp->phy_flags |=
4161                                                 TG3_PHYFLG_PARALLEL_DETECT;
4162                                         tp->serdes_counter =
4163                                                 SERDES_PARALLEL_DET_TIMEOUT;
4164                                 } else
4165                                         goto restart_autoneg;
4166                         }
4167                 }
4168         } else {
4169                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4170                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4171         }
4172
4173 out:
4174         return current_link_up;
4175 }
4176
4177 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4178 {
4179         int current_link_up = 0;
4180
4181         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4182                 goto out;
4183
4184         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4185                 u32 txflags, rxflags;
4186                 int i;
4187
4188                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4189                         u32 local_adv = 0, remote_adv = 0;
4190
4191                         if (txflags & ANEG_CFG_PS1)
4192                                 local_adv |= ADVERTISE_1000XPAUSE;
4193                         if (txflags & ANEG_CFG_PS2)
4194                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4195
4196                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4197                                 remote_adv |= LPA_1000XPAUSE;
4198                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4199                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4200
4201                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4202
4203                         current_link_up = 1;
4204                 }
4205                 for (i = 0; i < 30; i++) {
4206                         udelay(20);
4207                         tw32_f(MAC_STATUS,
4208                                (MAC_STATUS_SYNC_CHANGED |
4209                                 MAC_STATUS_CFG_CHANGED));
4210                         udelay(40);
4211                         if ((tr32(MAC_STATUS) &
4212                              (MAC_STATUS_SYNC_CHANGED |
4213                               MAC_STATUS_CFG_CHANGED)) == 0)
4214                                 break;
4215                 }
4216
4217                 mac_status = tr32(MAC_STATUS);
4218                 if (current_link_up == 0 &&
4219                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4220                     !(mac_status & MAC_STATUS_RCVD_CFG))
4221                         current_link_up = 1;
4222         } else {
4223                 tg3_setup_flow_control(tp, 0, 0);
4224
4225                 /* Forcing 1000FD link up. */
4226                 current_link_up = 1;
4227
4228                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4229                 udelay(40);
4230
4231                 tw32_f(MAC_MODE, tp->mac_mode);
4232                 udelay(40);
4233         }
4234
4235 out:
4236         return current_link_up;
4237 }
4238
4239 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4240 {
4241         u32 orig_pause_cfg;
4242         u16 orig_active_speed;
4243         u8 orig_active_duplex;
4244         u32 mac_status;
4245         int current_link_up;
4246         int i;
4247
4248         orig_pause_cfg = tp->link_config.active_flowctrl;
4249         orig_active_speed = tp->link_config.active_speed;
4250         orig_active_duplex = tp->link_config.active_duplex;
4251
4252         if (!tg3_flag(tp, HW_AUTONEG) &&
4253             netif_carrier_ok(tp->dev) &&
4254             tg3_flag(tp, INIT_COMPLETE)) {
4255                 mac_status = tr32(MAC_STATUS);
4256                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4257                                MAC_STATUS_SIGNAL_DET |
4258                                MAC_STATUS_CFG_CHANGED |
4259                                MAC_STATUS_RCVD_CFG);
4260                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4261                                    MAC_STATUS_SIGNAL_DET)) {
4262                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4263                                             MAC_STATUS_CFG_CHANGED));
4264                         return 0;
4265                 }
4266         }
4267
4268         tw32_f(MAC_TX_AUTO_NEG, 0);
4269
4270         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4271         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4272         tw32_f(MAC_MODE, tp->mac_mode);
4273         udelay(40);
4274
4275         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4276                 tg3_init_bcm8002(tp);
4277
4278         /* Enable link change event even when serdes polling.  */
4279         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4280         udelay(40);
4281
4282         current_link_up = 0;
4283         mac_status = tr32(MAC_STATUS);
4284
4285         if (tg3_flag(tp, HW_AUTONEG))
4286                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4287         else
4288                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4289
4290         tp->napi[0].hw_status->status =
4291                 (SD_STATUS_UPDATED |
4292                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4293
4294         for (i = 0; i < 100; i++) {
4295                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4296                                     MAC_STATUS_CFG_CHANGED));
4297                 udelay(5);
4298                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4299                                          MAC_STATUS_CFG_CHANGED |
4300                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4301                         break;
4302         }
4303
4304         mac_status = tr32(MAC_STATUS);
4305         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4306                 current_link_up = 0;
4307                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4308                     tp->serdes_counter == 0) {
4309                         tw32_f(MAC_MODE, (tp->mac_mode |
4310                                           MAC_MODE_SEND_CONFIGS));
4311                         udelay(1);
4312                         tw32_f(MAC_MODE, tp->mac_mode);
4313                 }
4314         }
4315
4316         if (current_link_up == 1) {
4317                 tp->link_config.active_speed = SPEED_1000;
4318                 tp->link_config.active_duplex = DUPLEX_FULL;
4319                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4320                                     LED_CTRL_LNKLED_OVERRIDE |
4321                                     LED_CTRL_1000MBPS_ON));
4322         } else {
4323                 tp->link_config.active_speed = SPEED_INVALID;
4324                 tp->link_config.active_duplex = DUPLEX_INVALID;
4325                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4326                                     LED_CTRL_LNKLED_OVERRIDE |
4327                                     LED_CTRL_TRAFFIC_OVERRIDE));
4328         }
4329
4330         if (current_link_up != netif_carrier_ok(tp->dev)) {
4331                 if (current_link_up)
4332                         netif_carrier_on(tp->dev);
4333                 else
4334                         netif_carrier_off(tp->dev);
4335                 tg3_link_report(tp);
4336         } else {
4337                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4338                 if (orig_pause_cfg != now_pause_cfg ||
4339                     orig_active_speed != tp->link_config.active_speed ||
4340                     orig_active_duplex != tp->link_config.active_duplex)
4341                         tg3_link_report(tp);
4342         }
4343
4344         return 0;
4345 }
4346
4347 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4348 {
4349         int current_link_up, err = 0;
4350         u32 bmsr, bmcr;
4351         u16 current_speed;
4352         u8 current_duplex;
4353         u32 local_adv, remote_adv;
4354
4355         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4356         tw32_f(MAC_MODE, tp->mac_mode);
4357         udelay(40);
4358
4359         tw32(MAC_EVENT, 0);
4360
4361         tw32_f(MAC_STATUS,
4362              (MAC_STATUS_SYNC_CHANGED |
4363               MAC_STATUS_CFG_CHANGED |
4364               MAC_STATUS_MI_COMPLETION |
4365               MAC_STATUS_LNKSTATE_CHANGED));
4366         udelay(40);
4367
4368         if (force_reset)
4369                 tg3_phy_reset(tp);
4370
4371         current_link_up = 0;
4372         current_speed = SPEED_INVALID;
4373         current_duplex = DUPLEX_INVALID;
4374
4375         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4376         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4378                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4379                         bmsr |= BMSR_LSTATUS;
4380                 else
4381                         bmsr &= ~BMSR_LSTATUS;
4382         }
4383
4384         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4385
4386         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4387             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4388                 /* do nothing, just check for link up at the end */
4389         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4390                 u32 adv, new_adv;
4391
4392                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4393                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4394                                   ADVERTISE_1000XPAUSE |
4395                                   ADVERTISE_1000XPSE_ASYM |
4396                                   ADVERTISE_SLCT);
4397
4398                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4399
4400                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4401                         new_adv |= ADVERTISE_1000XHALF;
4402                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4403                         new_adv |= ADVERTISE_1000XFULL;
4404
4405                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4406                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4407                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4408                         tg3_writephy(tp, MII_BMCR, bmcr);
4409
4410                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4411                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4412                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4413
4414                         return err;
4415                 }
4416         } else {
4417                 u32 new_bmcr;
4418
4419                 bmcr &= ~BMCR_SPEED1000;
4420                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4421
4422                 if (tp->link_config.duplex == DUPLEX_FULL)
4423                         new_bmcr |= BMCR_FULLDPLX;
4424
4425                 if (new_bmcr != bmcr) {
4426                         /* BMCR_SPEED1000 is a reserved bit that needs
4427                          * to be set on write.
4428                          */
4429                         new_bmcr |= BMCR_SPEED1000;
4430
4431                         /* Force a linkdown */
4432                         if (netif_carrier_ok(tp->dev)) {
4433                                 u32 adv;
4434
4435                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4436                                 adv &= ~(ADVERTISE_1000XFULL |
4437                                          ADVERTISE_1000XHALF |
4438                                          ADVERTISE_SLCT);
4439                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4440                                 tg3_writephy(tp, MII_BMCR, bmcr |
4441                                                            BMCR_ANRESTART |
4442                                                            BMCR_ANENABLE);
4443                                 udelay(10);
4444                                 netif_carrier_off(tp->dev);
4445                         }
4446                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4447                         bmcr = new_bmcr;
4448                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4449                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4450                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4451                             ASIC_REV_5714) {
4452                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4453                                         bmsr |= BMSR_LSTATUS;
4454                                 else
4455                                         bmsr &= ~BMSR_LSTATUS;
4456                         }
4457                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4458                 }
4459         }
4460
4461         if (bmsr & BMSR_LSTATUS) {
4462                 current_speed = SPEED_1000;
4463                 current_link_up = 1;
4464                 if (bmcr & BMCR_FULLDPLX)
4465                         current_duplex = DUPLEX_FULL;
4466                 else
4467                         current_duplex = DUPLEX_HALF;
4468
4469                 local_adv = 0;
4470                 remote_adv = 0;
4471
4472                 if (bmcr & BMCR_ANENABLE) {
4473                         u32 common;
4474
4475                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4476                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4477                         common = local_adv & remote_adv;
4478                         if (common & (ADVERTISE_1000XHALF |
4479                                       ADVERTISE_1000XFULL)) {
4480                                 if (common & ADVERTISE_1000XFULL)
4481                                         current_duplex = DUPLEX_FULL;
4482                                 else
4483                                         current_duplex = DUPLEX_HALF;
4484                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4485                                 /* Link is up via parallel detect */
4486                         } else {
4487                                 current_link_up = 0;
4488                         }
4489                 }
4490         }
4491
4492         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4493                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4494
4495         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4496         if (tp->link_config.active_duplex == DUPLEX_HALF)
4497                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4498
4499         tw32_f(MAC_MODE, tp->mac_mode);
4500         udelay(40);
4501
4502         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4503
4504         tp->link_config.active_speed = current_speed;
4505         tp->link_config.active_duplex = current_duplex;
4506
4507         if (current_link_up != netif_carrier_ok(tp->dev)) {
4508                 if (current_link_up)
4509                         netif_carrier_on(tp->dev);
4510                 else {
4511                         netif_carrier_off(tp->dev);
4512                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4513                 }
4514                 tg3_link_report(tp);
4515         }
4516         return err;
4517 }
4518
4519 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4520 {
4521         if (tp->serdes_counter) {
4522                 /* Give autoneg time to complete. */
4523                 tp->serdes_counter--;
4524                 return;
4525         }
4526
4527         if (!netif_carrier_ok(tp->dev) &&
4528             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4529                 u32 bmcr;
4530
4531                 tg3_readphy(tp, MII_BMCR, &bmcr);
4532                 if (bmcr & BMCR_ANENABLE) {
4533                         u32 phy1, phy2;
4534
4535                         /* Select shadow register 0x1f */
4536                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4537                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4538
4539                         /* Select expansion interrupt status register */
4540                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4541                                          MII_TG3_DSP_EXP1_INT_STAT);
4542                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4543                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4544
4545                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4546                                 /* We have signal detect and not receiving
4547                                  * config code words, link is up by parallel
4548                                  * detection.
4549                                  */
4550
4551                                 bmcr &= ~BMCR_ANENABLE;
4552                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4553                                 tg3_writephy(tp, MII_BMCR, bmcr);
4554                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4555                         }
4556                 }
4557         } else if (netif_carrier_ok(tp->dev) &&
4558                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4559                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4560                 u32 phy2;
4561
4562                 /* Select expansion interrupt status register */
4563                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4564                                  MII_TG3_DSP_EXP1_INT_STAT);
4565                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4566                 if (phy2 & 0x20) {
4567                         u32 bmcr;
4568
4569                         /* Config code words received, turn on autoneg. */
4570                         tg3_readphy(tp, MII_BMCR, &bmcr);
4571                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4572
4573                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4574
4575                 }
4576         }
4577 }
4578
4579 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4580 {
4581         u32 val;
4582         int err;
4583
4584         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4585                 err = tg3_setup_fiber_phy(tp, force_reset);
4586         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4587                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4588         else
4589                 err = tg3_setup_copper_phy(tp, force_reset);
4590
4591         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4592                 u32 scale;
4593
4594                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4595                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4596                         scale = 65;
4597                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4598                         scale = 6;
4599                 else
4600                         scale = 12;
4601
4602                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4603                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4604                 tw32(GRC_MISC_CFG, val);
4605         }
4606
4607         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4608               (6 << TX_LENGTHS_IPG_SHIFT);
4609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4610                 val |= tr32(MAC_TX_LENGTHS) &
4611                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4612                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4613
4614         if (tp->link_config.active_speed == SPEED_1000 &&
4615             tp->link_config.active_duplex == DUPLEX_HALF)
4616                 tw32(MAC_TX_LENGTHS, val |
4617                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4618         else
4619                 tw32(MAC_TX_LENGTHS, val |
4620                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4621
4622         if (!tg3_flag(tp, 5705_PLUS)) {
4623                 if (netif_carrier_ok(tp->dev)) {
4624                         tw32(HOSTCC_STAT_COAL_TICKS,
4625                              tp->coal.stats_block_coalesce_usecs);
4626                 } else {
4627                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4628                 }
4629         }
4630
4631         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4632                 val = tr32(PCIE_PWR_MGMT_THRESH);
4633                 if (!netif_carrier_ok(tp->dev))
4634                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4635                               tp->pwrmgmt_thresh;
4636                 else
4637                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4638                 tw32(PCIE_PWR_MGMT_THRESH, val);
4639         }
4640
4641         return err;
4642 }
4643
4644 static inline int tg3_irq_sync(struct tg3 *tp)
4645 {
4646         return tp->irq_sync;
4647 }
4648
4649 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4650 {
4651         int i;
4652
4653         dst = (u32 *)((u8 *)dst + off);
4654         for (i = 0; i < len; i += sizeof(u32))
4655                 *dst++ = tr32(off + i);
4656 }
4657
4658 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4659 {
4660         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4661         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4662         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4663         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4664         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4665         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4666         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4667         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4668         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4669         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4670         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4671         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4672         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4673         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4674         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4675         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4676         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4677         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4678         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4679
4680         if (tg3_flag(tp, SUPPORT_MSIX))
4681                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4682
4683         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4684         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4685         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4686         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4687         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4688         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4689         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4690         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4691
4692         if (!tg3_flag(tp, 5705_PLUS)) {
4693                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4694                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4695                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4696         }
4697
4698         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4699         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4700         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4701         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4702         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4703
4704         if (tg3_flag(tp, NVRAM))
4705                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4706 }
4707
4708 static void tg3_dump_state(struct tg3 *tp)
4709 {
4710         int i;
4711         u32 *regs;
4712
4713         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4714         if (!regs) {
4715                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4716                 return;
4717         }
4718
4719         if (tg3_flag(tp, PCI_EXPRESS)) {
4720                 /* Read up to but not including private PCI registers */
4721                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4722                         regs[i / sizeof(u32)] = tr32(i);
4723         } else
4724                 tg3_dump_legacy_regs(tp, regs);
4725
4726         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4727                 if (!regs[i + 0] && !regs[i + 1] &&
4728                     !regs[i + 2] && !regs[i + 3])
4729                         continue;
4730
4731                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4732                            i * 4,
4733                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4734         }
4735
4736         kfree(regs);
4737
4738         for (i = 0; i < tp->irq_cnt; i++) {
4739                 struct tg3_napi *tnapi = &tp->napi[i];
4740
4741                 /* SW status block */
4742                 netdev_err(tp->dev,
4743                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4744                            i,
4745                            tnapi->hw_status->status,
4746                            tnapi->hw_status->status_tag,
4747                            tnapi->hw_status->rx_jumbo_consumer,
4748                            tnapi->hw_status->rx_consumer,
4749                            tnapi->hw_status->rx_mini_consumer,
4750                            tnapi->hw_status->idx[0].rx_producer,
4751                            tnapi->hw_status->idx[0].tx_consumer);
4752
4753                 netdev_err(tp->dev,
4754                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4755                            i,
4756                            tnapi->last_tag, tnapi->last_irq_tag,
4757                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4758                            tnapi->rx_rcb_ptr,
4759                            tnapi->prodring.rx_std_prod_idx,
4760                            tnapi->prodring.rx_std_cons_idx,
4761                            tnapi->prodring.rx_jmb_prod_idx,
4762                            tnapi->prodring.rx_jmb_cons_idx);
4763         }
4764 }
4765
4766 /* This is called whenever we suspect that the system chipset is re-
4767  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4768  * is bogus tx completions. We try to recover by setting the
4769  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4770  * in the workqueue.
4771  */
4772 static void tg3_tx_recover(struct tg3 *tp)
4773 {
4774         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4775                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4776
4777         netdev_warn(tp->dev,
4778                     "The system may be re-ordering memory-mapped I/O "
4779                     "cycles to the network device, attempting to recover. "
4780                     "Please report the problem to the driver maintainer "
4781                     "and include system chipset information.\n");
4782
4783         spin_lock(&tp->lock);
4784         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4785         spin_unlock(&tp->lock);
4786 }
4787
4788 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4789 {
4790         /* Tell compiler to fetch tx indices from memory. */
4791         barrier();
4792         return tnapi->tx_pending -
4793                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4794 }
4795
4796 /* Tigon3 never reports partial packet sends.  So we do not
4797  * need special logic to handle SKBs that have not had all
4798  * of their frags sent yet, like SunGEM does.
4799  */
4800 static void tg3_tx(struct tg3_napi *tnapi)
4801 {
4802         struct tg3 *tp = tnapi->tp;
4803         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4804         u32 sw_idx = tnapi->tx_cons;
4805         struct netdev_queue *txq;
4806         int index = tnapi - tp->napi;
4807
4808         if (tg3_flag(tp, ENABLE_TSS))
4809                 index--;
4810
4811         txq = netdev_get_tx_queue(tp->dev, index);
4812
4813         while (sw_idx != hw_idx) {
4814                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4815                 struct sk_buff *skb = ri->skb;
4816                 int i, tx_bug = 0;
4817
4818                 if (unlikely(skb == NULL)) {
4819                         tg3_tx_recover(tp);
4820                         return;
4821                 }
4822
4823                 pci_unmap_single(tp->pdev,
4824                                  dma_unmap_addr(ri, mapping),
4825                                  skb_headlen(skb),
4826                                  PCI_DMA_TODEVICE);
4827
4828                 ri->skb = NULL;
4829
4830                 sw_idx = NEXT_TX(sw_idx);
4831
4832                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4833                         ri = &tnapi->tx_buffers[sw_idx];
4834                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4835                                 tx_bug = 1;
4836
4837                         pci_unmap_page(tp->pdev,
4838                                        dma_unmap_addr(ri, mapping),
4839                                        skb_shinfo(skb)->frags[i].size,
4840                                        PCI_DMA_TODEVICE);
4841                         sw_idx = NEXT_TX(sw_idx);
4842                 }
4843
4844                 dev_kfree_skb(skb);
4845
4846                 if (unlikely(tx_bug)) {
4847                         tg3_tx_recover(tp);
4848                         return;
4849                 }
4850         }
4851
4852         tnapi->tx_cons = sw_idx;
4853
4854         /* Need to make the tx_cons update visible to tg3_start_xmit()
4855          * before checking for netif_queue_stopped().  Without the
4856          * memory barrier, there is a small possibility that tg3_start_xmit()
4857          * will miss it and cause the queue to be stopped forever.
4858          */
4859         smp_mb();
4860
4861         if (unlikely(netif_tx_queue_stopped(txq) &&
4862                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4863                 __netif_tx_lock(txq, smp_processor_id());
4864                 if (netif_tx_queue_stopped(txq) &&
4865                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4866                         netif_tx_wake_queue(txq);
4867                 __netif_tx_unlock(txq);
4868         }
4869 }
4870
4871 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4872 {
4873         if (!ri->skb)
4874                 return;
4875
4876         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4877                          map_sz, PCI_DMA_FROMDEVICE);
4878         dev_kfree_skb_any(ri->skb);
4879         ri->skb = NULL;
4880 }
4881
4882 /* Returns size of skb allocated or < 0 on error.
4883  *
4884  * We only need to fill in the address because the other members
4885  * of the RX descriptor are invariant, see tg3_init_rings.
4886  *
4887  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4888  * posting buffers we only dirty the first cache line of the RX
4889  * descriptor (containing the address).  Whereas for the RX status
4890  * buffers the cpu only reads the last cacheline of the RX descriptor
4891  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4892  */
4893 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4894                             u32 opaque_key, u32 dest_idx_unmasked)
4895 {
4896         struct tg3_rx_buffer_desc *desc;
4897         struct ring_info *map;
4898         struct sk_buff *skb;
4899         dma_addr_t mapping;
4900         int skb_size, dest_idx;
4901
4902         switch (opaque_key) {
4903         case RXD_OPAQUE_RING_STD:
4904                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4905                 desc = &tpr->rx_std[dest_idx];
4906                 map = &tpr->rx_std_buffers[dest_idx];
4907                 skb_size = tp->rx_pkt_map_sz;
4908                 break;
4909
4910         case RXD_OPAQUE_RING_JUMBO:
4911                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4912                 desc = &tpr->rx_jmb[dest_idx].std;
4913                 map = &tpr->rx_jmb_buffers[dest_idx];
4914                 skb_size = TG3_RX_JMB_MAP_SZ;
4915                 break;
4916
4917         default:
4918                 return -EINVAL;
4919         }
4920
4921         /* Do not overwrite any of the map or rp information
4922          * until we are sure we can commit to a new buffer.
4923          *
4924          * Callers depend upon this behavior and assume that
4925          * we leave everything unchanged if we fail.
4926          */
4927         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4928         if (skb == NULL)
4929                 return -ENOMEM;
4930
4931         skb_reserve(skb, tp->rx_offset);
4932
4933         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4934                                  PCI_DMA_FROMDEVICE);
4935         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4936                 dev_kfree_skb(skb);
4937                 return -EIO;
4938         }
4939
4940         map->skb = skb;
4941         dma_unmap_addr_set(map, mapping, mapping);
4942
4943         desc->addr_hi = ((u64)mapping >> 32);
4944         desc->addr_lo = ((u64)mapping & 0xffffffff);
4945
4946         return skb_size;
4947 }
4948
4949 /* We only need to move over in the address because the other
4950  * members of the RX descriptor are invariant.  See notes above
4951  * tg3_alloc_rx_skb for full details.
4952  */
4953 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4954                            struct tg3_rx_prodring_set *dpr,
4955                            u32 opaque_key, int src_idx,
4956                            u32 dest_idx_unmasked)
4957 {
4958         struct tg3 *tp = tnapi->tp;
4959         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4960         struct ring_info *src_map, *dest_map;
4961         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4962         int dest_idx;
4963
4964         switch (opaque_key) {
4965         case RXD_OPAQUE_RING_STD:
4966                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4967                 dest_desc = &dpr->rx_std[dest_idx];
4968                 dest_map = &dpr->rx_std_buffers[dest_idx];
4969                 src_desc = &spr->rx_std[src_idx];
4970                 src_map = &spr->rx_std_buffers[src_idx];
4971                 break;
4972
4973         case RXD_OPAQUE_RING_JUMBO:
4974                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4975                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4976                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4977                 src_desc = &spr->rx_jmb[src_idx].std;
4978                 src_map = &spr->rx_jmb_buffers[src_idx];
4979                 break;
4980
4981         default:
4982                 return;
4983         }
4984
4985         dest_map->skb = src_map->skb;
4986         dma_unmap_addr_set(dest_map, mapping,
4987                            dma_unmap_addr(src_map, mapping));
4988         dest_desc->addr_hi = src_desc->addr_hi;
4989         dest_desc->addr_lo = src_desc->addr_lo;
4990
4991         /* Ensure that the update to the skb happens after the physical
4992          * addresses have been transferred to the new BD location.
4993          */
4994         smp_wmb();
4995
4996         src_map->skb = NULL;
4997 }
4998
4999 /* The RX ring scheme is composed of multiple rings which post fresh
5000  * buffers to the chip, and one special ring the chip uses to report
5001  * status back to the host.
5002  *
5003  * The special ring reports the status of received packets to the
5004  * host.  The chip does not write into the original descriptor the
5005  * RX buffer was obtained from.  The chip simply takes the original
5006  * descriptor as provided by the host, updates the status and length
5007  * field, then writes this into the next status ring entry.
5008  *
5009  * Each ring the host uses to post buffers to the chip is described
5010  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5011  * it is first placed into the on-chip ram.  When the packet's length
5012  * is known, it walks down the TG3_BDINFO entries to select the ring.
5013  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5014  * which is within the range of the new packet's length is chosen.
5015  *
5016  * The "separate ring for rx status" scheme may sound queer, but it makes
5017  * sense from a cache coherency perspective.  If only the host writes
5018  * to the buffer post rings, and only the chip writes to the rx status
5019  * rings, then cache lines never move beyond shared-modified state.
5020  * If both the host and chip were to write into the same ring, cache line
5021  * eviction could occur since both entities want it in an exclusive state.
5022  */
5023 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5024 {
5025         struct tg3 *tp = tnapi->tp;
5026         u32 work_mask, rx_std_posted = 0;
5027         u32 std_prod_idx, jmb_prod_idx;
5028         u32 sw_idx = tnapi->rx_rcb_ptr;
5029         u16 hw_idx;
5030         int received;
5031         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5032
5033         hw_idx = *(tnapi->rx_rcb_prod_idx);
5034         /*
5035          * We need to order the read of hw_idx and the read of
5036          * the opaque cookie.
5037          */
5038         rmb();
5039         work_mask = 0;
5040         received = 0;
5041         std_prod_idx = tpr->rx_std_prod_idx;
5042         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5043         while (sw_idx != hw_idx && budget > 0) {
5044                 struct ring_info *ri;
5045                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5046                 unsigned int len;
5047                 struct sk_buff *skb;
5048                 dma_addr_t dma_addr;
5049                 u32 opaque_key, desc_idx, *post_ptr;
5050
5051                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5052                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5053                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5054                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5055                         dma_addr = dma_unmap_addr(ri, mapping);
5056                         skb = ri->skb;
5057                         post_ptr = &std_prod_idx;
5058                         rx_std_posted++;
5059                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5060                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5061                         dma_addr = dma_unmap_addr(ri, mapping);
5062                         skb = ri->skb;
5063                         post_ptr = &jmb_prod_idx;
5064                 } else
5065                         goto next_pkt_nopost;
5066
5067                 work_mask |= opaque_key;
5068
5069                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5070                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5071                 drop_it:
5072                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5073                                        desc_idx, *post_ptr);
5074                 drop_it_no_recycle:
5075                         /* Other statistics kept track of by card. */
5076                         tp->rx_dropped++;
5077                         goto next_pkt;
5078                 }
5079
5080                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5081                       ETH_FCS_LEN;
5082
5083                 if (len > TG3_RX_COPY_THRESH(tp)) {
5084                         int skb_size;
5085
5086                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5087                                                     *post_ptr);
5088                         if (skb_size < 0)
5089                                 goto drop_it;
5090
5091                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5092                                          PCI_DMA_FROMDEVICE);
5093
5094                         /* Ensure that the update to the skb happens
5095                          * after the usage of the old DMA mapping.
5096                          */
5097                         smp_wmb();
5098
5099                         ri->skb = NULL;
5100
5101                         skb_put(skb, len);
5102                 } else {
5103                         struct sk_buff *copy_skb;
5104
5105                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5106                                        desc_idx, *post_ptr);
5107
5108                         copy_skb = netdev_alloc_skb(tp->dev, len +
5109                                                     TG3_RAW_IP_ALIGN);
5110                         if (copy_skb == NULL)
5111                                 goto drop_it_no_recycle;
5112
5113                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5114                         skb_put(copy_skb, len);
5115                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5116                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5117                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5118
5119                         /* We'll reuse the original ring buffer. */
5120                         skb = copy_skb;
5121                 }
5122
5123                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5124                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5125                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5126                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5127                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5128                 else
5129                         skb_checksum_none_assert(skb);
5130
5131                 skb->protocol = eth_type_trans(skb, tp->dev);
5132
5133                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5134                     skb->protocol != htons(ETH_P_8021Q)) {
5135                         dev_kfree_skb(skb);
5136                         goto drop_it_no_recycle;
5137                 }
5138
5139                 if (desc->type_flags & RXD_FLAG_VLAN &&
5140                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5141                         __vlan_hwaccel_put_tag(skb,
5142                                                desc->err_vlan & RXD_VLAN_MASK);
5143
5144                 napi_gro_receive(&tnapi->napi, skb);
5145
5146                 received++;
5147                 budget--;
5148
5149 next_pkt:
5150                 (*post_ptr)++;
5151
5152                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5153                         tpr->rx_std_prod_idx = std_prod_idx &
5154                                                tp->rx_std_ring_mask;
5155                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5156                                      tpr->rx_std_prod_idx);
5157                         work_mask &= ~RXD_OPAQUE_RING_STD;
5158                         rx_std_posted = 0;
5159                 }
5160 next_pkt_nopost:
5161                 sw_idx++;
5162                 sw_idx &= tp->rx_ret_ring_mask;
5163
5164                 /* Refresh hw_idx to see if there is new work */
5165                 if (sw_idx == hw_idx) {
5166                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5167                         rmb();
5168                 }
5169         }
5170
5171         /* ACK the status ring. */
5172         tnapi->rx_rcb_ptr = sw_idx;
5173         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5174
5175         /* Refill RX ring(s). */
5176         if (!tg3_flag(tp, ENABLE_RSS)) {
5177                 if (work_mask & RXD_OPAQUE_RING_STD) {
5178                         tpr->rx_std_prod_idx = std_prod_idx &
5179                                                tp->rx_std_ring_mask;
5180                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5181                                      tpr->rx_std_prod_idx);
5182                 }
5183                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5184                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5185                                                tp->rx_jmb_ring_mask;
5186                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5187                                      tpr->rx_jmb_prod_idx);
5188                 }
5189                 mmiowb();
5190         } else if (work_mask) {
5191                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5192                  * updated before the producer indices can be updated.
5193                  */
5194                 smp_wmb();
5195
5196                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5197                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5198
5199                 if (tnapi != &tp->napi[1])
5200                         napi_schedule(&tp->napi[1].napi);
5201         }
5202
5203         return received;
5204 }
5205
5206 static void tg3_poll_link(struct tg3 *tp)
5207 {
5208         /* handle link change and other phy events */
5209         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5210                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5211
5212                 if (sblk->status & SD_STATUS_LINK_CHG) {
5213                         sblk->status = SD_STATUS_UPDATED |
5214                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5215                         spin_lock(&tp->lock);
5216                         if (tg3_flag(tp, USE_PHYLIB)) {
5217                                 tw32_f(MAC_STATUS,
5218                                      (MAC_STATUS_SYNC_CHANGED |
5219                                       MAC_STATUS_CFG_CHANGED |
5220                                       MAC_STATUS_MI_COMPLETION |
5221                                       MAC_STATUS_LNKSTATE_CHANGED));
5222                                 udelay(40);
5223                         } else
5224                                 tg3_setup_phy(tp, 0);
5225                         spin_unlock(&tp->lock);
5226                 }
5227         }
5228 }
5229
5230 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5231                                 struct tg3_rx_prodring_set *dpr,
5232                                 struct tg3_rx_prodring_set *spr)
5233 {
5234         u32 si, di, cpycnt, src_prod_idx;
5235         int i, err = 0;
5236
5237         while (1) {
5238                 src_prod_idx = spr->rx_std_prod_idx;
5239
5240                 /* Make sure updates to the rx_std_buffers[] entries and the
5241                  * standard producer index are seen in the correct order.
5242                  */
5243                 smp_rmb();
5244
5245                 if (spr->rx_std_cons_idx == src_prod_idx)
5246                         break;
5247
5248                 if (spr->rx_std_cons_idx < src_prod_idx)
5249                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5250                 else
5251                         cpycnt = tp->rx_std_ring_mask + 1 -
5252                                  spr->rx_std_cons_idx;
5253
5254                 cpycnt = min(cpycnt,
5255                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5256
5257                 si = spr->rx_std_cons_idx;
5258                 di = dpr->rx_std_prod_idx;
5259
5260                 for (i = di; i < di + cpycnt; i++) {
5261                         if (dpr->rx_std_buffers[i].skb) {
5262                                 cpycnt = i - di;
5263                                 err = -ENOSPC;
5264                                 break;
5265                         }
5266                 }
5267
5268                 if (!cpycnt)
5269                         break;
5270
5271                 /* Ensure that updates to the rx_std_buffers ring and the
5272                  * shadowed hardware producer ring from tg3_recycle_skb() are
5273                  * ordered correctly WRT the skb check above.
5274                  */
5275                 smp_rmb();
5276
5277                 memcpy(&dpr->rx_std_buffers[di],
5278                        &spr->rx_std_buffers[si],
5279                        cpycnt * sizeof(struct ring_info));
5280
5281                 for (i = 0; i < cpycnt; i++, di++, si++) {
5282                         struct tg3_rx_buffer_desc *sbd, *dbd;
5283                         sbd = &spr->rx_std[si];
5284                         dbd = &dpr->rx_std[di];
5285                         dbd->addr_hi = sbd->addr_hi;
5286                         dbd->addr_lo = sbd->addr_lo;
5287                 }
5288
5289                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5290                                        tp->rx_std_ring_mask;
5291                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5292                                        tp->rx_std_ring_mask;
5293         }
5294
5295         while (1) {
5296                 src_prod_idx = spr->rx_jmb_prod_idx;
5297
5298                 /* Make sure updates to the rx_jmb_buffers[] entries and
5299                  * the jumbo producer index are seen in the correct order.
5300                  */
5301                 smp_rmb();
5302
5303                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5304                         break;
5305
5306                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5307                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5308                 else
5309                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5310                                  spr->rx_jmb_cons_idx;
5311
5312                 cpycnt = min(cpycnt,
5313                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5314
5315                 si = spr->rx_jmb_cons_idx;
5316                 di = dpr->rx_jmb_prod_idx;
5317
5318                 for (i = di; i < di + cpycnt; i++) {
5319                         if (dpr->rx_jmb_buffers[i].skb) {
5320                                 cpycnt = i - di;
5321                                 err = -ENOSPC;
5322                                 break;
5323                         }
5324                 }
5325
5326                 if (!cpycnt)
5327                         break;
5328
5329                 /* Ensure that updates to the rx_jmb_buffers ring and the
5330                  * shadowed hardware producer ring from tg3_recycle_skb() are
5331                  * ordered correctly WRT the skb check above.
5332                  */
5333                 smp_rmb();
5334
5335                 memcpy(&dpr->rx_jmb_buffers[di],
5336                        &spr->rx_jmb_buffers[si],
5337                        cpycnt * sizeof(struct ring_info));
5338
5339                 for (i = 0; i < cpycnt; i++, di++, si++) {
5340                         struct tg3_rx_buffer_desc *sbd, *dbd;
5341                         sbd = &spr->rx_jmb[si].std;
5342                         dbd = &dpr->rx_jmb[di].std;
5343                         dbd->addr_hi = sbd->addr_hi;
5344                         dbd->addr_lo = sbd->addr_lo;
5345                 }
5346
5347                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5348                                        tp->rx_jmb_ring_mask;
5349                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5350                                        tp->rx_jmb_ring_mask;
5351         }
5352
5353         return err;
5354 }
5355
5356 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5357 {
5358         struct tg3 *tp = tnapi->tp;
5359
5360         /* run TX completion thread */
5361         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5362                 tg3_tx(tnapi);
5363                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5364                         return work_done;
5365         }
5366
5367         /* run RX thread, within the bounds set by NAPI.
5368          * All RX "locking" is done by ensuring outside
5369          * code synchronizes with tg3->napi.poll()
5370          */
5371         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5372                 work_done += tg3_rx(tnapi, budget - work_done);
5373
5374         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5375                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5376                 int i, err = 0;
5377                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5378                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5379
5380                 for (i = 1; i < tp->irq_cnt; i++)
5381                         err |= tg3_rx_prodring_xfer(tp, dpr,
5382                                                     &tp->napi[i].prodring);
5383
5384                 wmb();
5385
5386                 if (std_prod_idx != dpr->rx_std_prod_idx)
5387                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5388                                      dpr->rx_std_prod_idx);
5389
5390                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5391                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5392                                      dpr->rx_jmb_prod_idx);
5393
5394                 mmiowb();
5395
5396                 if (err)
5397                         tw32_f(HOSTCC_MODE, tp->coal_now);
5398         }
5399
5400         return work_done;
5401 }
5402
5403 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5404 {
5405         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5406         struct tg3 *tp = tnapi->tp;
5407         int work_done = 0;
5408         struct tg3_hw_status *sblk = tnapi->hw_status;
5409
5410         while (1) {
5411                 work_done = tg3_poll_work(tnapi, work_done, budget);
5412
5413                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5414                         goto tx_recovery;
5415
5416                 if (unlikely(work_done >= budget))
5417                         break;
5418
5419                 /* tp->last_tag is used in tg3_int_reenable() below
5420                  * to tell the hw how much work has been processed,
5421                  * so we must read it before checking for more work.
5422                  */
5423                 tnapi->last_tag = sblk->status_tag;
5424                 tnapi->last_irq_tag = tnapi->last_tag;
5425                 rmb();
5426
5427                 /* check for RX/TX work to do */
5428                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5429                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5430                         napi_complete(napi);
5431                         /* Reenable interrupts. */
5432                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5433                         mmiowb();
5434                         break;
5435                 }
5436         }
5437
5438         return work_done;
5439
5440 tx_recovery:
5441         /* work_done is guaranteed to be less than budget. */
5442         napi_complete(napi);
5443         schedule_work(&tp->reset_task);
5444         return work_done;
5445 }
5446
5447 static void tg3_process_error(struct tg3 *tp)
5448 {
5449         u32 val;
5450         bool real_error = false;
5451
5452         if (tg3_flag(tp, ERROR_PROCESSED))
5453                 return;
5454
5455         /* Check Flow Attention register */
5456         val = tr32(HOSTCC_FLOW_ATTN);
5457         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5458                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5459                 real_error = true;
5460         }
5461
5462         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5463                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5464                 real_error = true;
5465         }
5466
5467         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5468                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5469                 real_error = true;
5470         }
5471
5472         if (!real_error)
5473                 return;
5474
5475         tg3_dump_state(tp);
5476
5477         tg3_flag_set(tp, ERROR_PROCESSED);
5478         schedule_work(&tp->reset_task);
5479 }
5480
5481 static int tg3_poll(struct napi_struct *napi, int budget)
5482 {
5483         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5484         struct tg3 *tp = tnapi->tp;
5485         int work_done = 0;
5486         struct tg3_hw_status *sblk = tnapi->hw_status;
5487
5488         while (1) {
5489                 if (sblk->status & SD_STATUS_ERROR)
5490                         tg3_process_error(tp);
5491
5492                 tg3_poll_link(tp);
5493
5494                 work_done = tg3_poll_work(tnapi, work_done, budget);
5495
5496                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5497                         goto tx_recovery;
5498
5499                 if (unlikely(work_done >= budget))
5500                         break;
5501
5502                 if (tg3_flag(tp, TAGGED_STATUS)) {
5503                         /* tp->last_tag is used in tg3_int_reenable() below
5504                          * to tell the hw how much work has been processed,
5505                          * so we must read it before checking for more work.
5506                          */
5507                         tnapi->last_tag = sblk->status_tag;
5508                         tnapi->last_irq_tag = tnapi->last_tag;
5509                         rmb();
5510                 } else
5511                         sblk->status &= ~SD_STATUS_UPDATED;
5512
5513                 if (likely(!tg3_has_work(tnapi))) {
5514                         napi_complete(napi);
5515                         tg3_int_reenable(tnapi);
5516                         break;
5517                 }
5518         }
5519
5520         return work_done;
5521
5522 tx_recovery:
5523         /* work_done is guaranteed to be less than budget. */
5524         napi_complete(napi);
5525         schedule_work(&tp->reset_task);
5526         return work_done;
5527 }
5528
5529 static void tg3_napi_disable(struct tg3 *tp)
5530 {
5531         int i;
5532
5533         for (i = tp->irq_cnt - 1; i >= 0; i--)
5534                 napi_disable(&tp->napi[i].napi);
5535 }
5536
5537 static void tg3_napi_enable(struct tg3 *tp)
5538 {
5539         int i;
5540
5541         for (i = 0; i < tp->irq_cnt; i++)
5542                 napi_enable(&tp->napi[i].napi);
5543 }
5544
5545 static void tg3_napi_init(struct tg3 *tp)
5546 {
5547         int i;
5548
5549         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5550         for (i = 1; i < tp->irq_cnt; i++)
5551                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5552 }
5553
5554 static void tg3_napi_fini(struct tg3 *tp)
5555 {
5556         int i;
5557
5558         for (i = 0; i < tp->irq_cnt; i++)
5559                 netif_napi_del(&tp->napi[i].napi);
5560 }
5561
5562 static inline void tg3_netif_stop(struct tg3 *tp)
5563 {
5564         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5565         tg3_napi_disable(tp);
5566         netif_tx_disable(tp->dev);
5567 }
5568
5569 static inline void tg3_netif_start(struct tg3 *tp)
5570 {
5571         /* NOTE: unconditional netif_tx_wake_all_queues is only
5572          * appropriate so long as all callers are assured to
5573          * have free tx slots (such as after tg3_init_hw)
5574          */
5575         netif_tx_wake_all_queues(tp->dev);
5576
5577         tg3_napi_enable(tp);
5578         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5579         tg3_enable_ints(tp);
5580 }
5581
5582 static void tg3_irq_quiesce(struct tg3 *tp)
5583 {
5584         int i;
5585
5586         BUG_ON(tp->irq_sync);
5587
5588         tp->irq_sync = 1;
5589         smp_mb();
5590
5591         for (i = 0; i < tp->irq_cnt; i++)
5592                 synchronize_irq(tp->napi[i].irq_vec);
5593 }
5594
5595 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5596  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5597  * with as well.  Most of the time, this is not necessary except when
5598  * shutting down the device.
5599  */
5600 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5601 {
5602         spin_lock_bh(&tp->lock);
5603         if (irq_sync)
5604                 tg3_irq_quiesce(tp);
5605 }
5606
5607 static inline void tg3_full_unlock(struct tg3 *tp)
5608 {
5609         spin_unlock_bh(&tp->lock);
5610 }
5611
5612 /* One-shot MSI handler - Chip automatically disables interrupt
5613  * after sending MSI so driver doesn't have to do it.
5614  */
5615 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5616 {
5617         struct tg3_napi *tnapi = dev_id;
5618         struct tg3 *tp = tnapi->tp;
5619
5620         prefetch(tnapi->hw_status);
5621         if (tnapi->rx_rcb)
5622                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5623
5624         if (likely(!tg3_irq_sync(tp)))
5625                 napi_schedule(&tnapi->napi);
5626
5627         return IRQ_HANDLED;
5628 }
5629
5630 /* MSI ISR - No need to check for interrupt sharing and no need to
5631  * flush status block and interrupt mailbox. PCI ordering rules
5632  * guarantee that MSI will arrive after the status block.
5633  */
5634 static irqreturn_t tg3_msi(int irq, void *dev_id)
5635 {
5636         struct tg3_napi *tnapi = dev_id;
5637         struct tg3 *tp = tnapi->tp;
5638
5639         prefetch(tnapi->hw_status);
5640         if (tnapi->rx_rcb)
5641                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5642         /*
5643          * Writing any value to intr-mbox-0 clears PCI INTA# and
5644          * chip-internal interrupt pending events.
5645          * Writing non-zero to intr-mbox-0 additional tells the
5646          * NIC to stop sending us irqs, engaging "in-intr-handler"
5647          * event coalescing.
5648          */
5649         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5650         if (likely(!tg3_irq_sync(tp)))
5651                 napi_schedule(&tnapi->napi);
5652
5653         return IRQ_RETVAL(1);
5654 }
5655
5656 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5657 {
5658         struct tg3_napi *tnapi = dev_id;
5659         struct tg3 *tp = tnapi->tp;
5660         struct tg3_hw_status *sblk = tnapi->hw_status;
5661         unsigned int handled = 1;
5662
5663         /* In INTx mode, it is possible for the interrupt to arrive at
5664          * the CPU before the status block posted prior to the interrupt.
5665          * Reading the PCI State register will confirm whether the
5666          * interrupt is ours and will flush the status block.
5667          */
5668         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5669                 if (tg3_flag(tp, CHIP_RESETTING) ||
5670                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5671                         handled = 0;
5672                         goto out;
5673                 }
5674         }
5675
5676         /*
5677          * Writing any value to intr-mbox-0 clears PCI INTA# and
5678          * chip-internal interrupt pending events.
5679          * Writing non-zero to intr-mbox-0 additional tells the
5680          * NIC to stop sending us irqs, engaging "in-intr-handler"
5681          * event coalescing.
5682          *
5683          * Flush the mailbox to de-assert the IRQ immediately to prevent
5684          * spurious interrupts.  The flush impacts performance but
5685          * excessive spurious interrupts can be worse in some cases.
5686          */
5687         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5688         if (tg3_irq_sync(tp))
5689                 goto out;
5690         sblk->status &= ~SD_STATUS_UPDATED;
5691         if (likely(tg3_has_work(tnapi))) {
5692                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5693                 napi_schedule(&tnapi->napi);
5694         } else {
5695                 /* No work, shared interrupt perhaps?  re-enable
5696                  * interrupts, and flush that PCI write
5697                  */
5698                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5699                                0x00000000);
5700         }
5701 out:
5702         return IRQ_RETVAL(handled);
5703 }
5704
5705 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5706 {
5707         struct tg3_napi *tnapi = dev_id;
5708         struct tg3 *tp = tnapi->tp;
5709         struct tg3_hw_status *sblk = tnapi->hw_status;
5710         unsigned int handled = 1;
5711
5712         /* In INTx mode, it is possible for the interrupt to arrive at
5713          * the CPU before the status block posted prior to the interrupt.
5714          * Reading the PCI State register will confirm whether the
5715          * interrupt is ours and will flush the status block.
5716          */
5717         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5718                 if (tg3_flag(tp, CHIP_RESETTING) ||
5719                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5720                         handled = 0;
5721                         goto out;
5722                 }
5723         }
5724
5725         /*
5726          * writing any value to intr-mbox-0 clears PCI INTA# and
5727          * chip-internal interrupt pending events.
5728          * writing non-zero to intr-mbox-0 additional tells the
5729          * NIC to stop sending us irqs, engaging "in-intr-handler"
5730          * event coalescing.
5731          *
5732          * Flush the mailbox to de-assert the IRQ immediately to prevent
5733          * spurious interrupts.  The flush impacts performance but
5734          * excessive spurious interrupts can be worse in some cases.
5735          */
5736         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5737
5738         /*
5739          * In a shared interrupt configuration, sometimes other devices'
5740          * interrupts will scream.  We record the current status tag here
5741          * so that the above check can report that the screaming interrupts
5742          * are unhandled.  Eventually they will be silenced.
5743          */
5744         tnapi->last_irq_tag = sblk->status_tag;
5745
5746         if (tg3_irq_sync(tp))
5747                 goto out;
5748
5749         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5750
5751         napi_schedule(&tnapi->napi);
5752
5753 out:
5754         return IRQ_RETVAL(handled);
5755 }
5756
5757 /* ISR for interrupt test */
5758 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5759 {
5760         struct tg3_napi *tnapi = dev_id;
5761         struct tg3 *tp = tnapi->tp;
5762         struct tg3_hw_status *sblk = tnapi->hw_status;
5763
5764         if ((sblk->status & SD_STATUS_UPDATED) ||
5765             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5766                 tg3_disable_ints(tp);
5767                 return IRQ_RETVAL(1);
5768         }
5769         return IRQ_RETVAL(0);
5770 }
5771
5772 static int tg3_init_hw(struct tg3 *, int);
5773 static int tg3_halt(struct tg3 *, int, int);
5774
5775 /* Restart hardware after configuration changes, self-test, etc.
5776  * Invoked with tp->lock held.
5777  */
5778 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5779         __releases(tp->lock)
5780         __acquires(tp->lock)
5781 {
5782         int err;
5783
5784         err = tg3_init_hw(tp, reset_phy);
5785         if (err) {
5786                 netdev_err(tp->dev,
5787                            "Failed to re-initialize device, aborting\n");
5788                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5789                 tg3_full_unlock(tp);
5790                 del_timer_sync(&tp->timer);
5791                 tp->irq_sync = 0;
5792                 tg3_napi_enable(tp);
5793                 dev_close(tp->dev);
5794                 tg3_full_lock(tp, 0);
5795         }
5796         return err;
5797 }
5798
5799 #ifdef CONFIG_NET_POLL_CONTROLLER
5800 static void tg3_poll_controller(struct net_device *dev)
5801 {
5802         int i;
5803         struct tg3 *tp = netdev_priv(dev);
5804
5805         for (i = 0; i < tp->irq_cnt; i++)
5806                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5807 }
5808 #endif
5809
5810 static void tg3_reset_task(struct work_struct *work)
5811 {
5812         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5813         int err;
5814         unsigned int restart_timer;
5815
5816         tg3_full_lock(tp, 0);
5817
5818         if (!netif_running(tp->dev)) {
5819                 tg3_full_unlock(tp);
5820                 return;
5821         }
5822
5823         tg3_full_unlock(tp);
5824
5825         tg3_phy_stop(tp);
5826
5827         tg3_netif_stop(tp);
5828
5829         tg3_full_lock(tp, 1);
5830
5831         restart_timer = tg3_flag(tp, RESTART_TIMER);
5832         tg3_flag_clear(tp, RESTART_TIMER);
5833
5834         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5835                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5836                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5837                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5838                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5839         }
5840
5841         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5842         err = tg3_init_hw(tp, 1);
5843         if (err)
5844                 goto out;
5845
5846         tg3_netif_start(tp);
5847
5848         if (restart_timer)
5849                 mod_timer(&tp->timer, jiffies + 1);
5850
5851 out:
5852         tg3_full_unlock(tp);
5853
5854         if (!err)
5855                 tg3_phy_start(tp);
5856 }
5857
5858 static void tg3_tx_timeout(struct net_device *dev)
5859 {
5860         struct tg3 *tp = netdev_priv(dev);
5861
5862         if (netif_msg_tx_err(tp)) {
5863                 netdev_err(dev, "transmit timed out, resetting\n");
5864                 tg3_dump_state(tp);
5865         }
5866
5867         schedule_work(&tp->reset_task);
5868 }
5869
5870 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5871 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5872 {
5873         u32 base = (u32) mapping & 0xffffffff;
5874
5875         return (base > 0xffffdcc0) && (base + len + 8 < base);
5876 }
5877
5878 /* Test for DMA addresses > 40-bit */
5879 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5880                                           int len)
5881 {
5882 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5883         if (tg3_flag(tp, 40BIT_DMA_BUG))
5884                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5885         return 0;
5886 #else
5887         return 0;
5888 #endif
5889 }
5890
5891 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5892                         dma_addr_t mapping, int len, u32 flags,
5893                         u32 mss_and_is_end)
5894 {
5895         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5896         int is_end = (mss_and_is_end & 0x1);
5897         u32 mss = (mss_and_is_end >> 1);
5898         u32 vlan_tag = 0;
5899
5900         if (is_end)
5901                 flags |= TXD_FLAG_END;
5902         if (flags & TXD_FLAG_VLAN) {
5903                 vlan_tag = flags >> 16;
5904                 flags &= 0xffff;
5905         }
5906         vlan_tag |= (mss << TXD_MSS_SHIFT);
5907
5908         txd->addr_hi = ((u64) mapping >> 32);
5909         txd->addr_lo = ((u64) mapping & 0xffffffff);
5910         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5911         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5912 }
5913
5914 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5915                                 struct sk_buff *skb, int last)
5916 {
5917         int i;
5918         u32 entry = tnapi->tx_prod;
5919         struct ring_info *txb = &tnapi->tx_buffers[entry];
5920
5921         pci_unmap_single(tnapi->tp->pdev,
5922                          dma_unmap_addr(txb, mapping),
5923                          skb_headlen(skb),
5924                          PCI_DMA_TODEVICE);
5925         for (i = 0; i < last; i++) {
5926                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5927
5928                 entry = NEXT_TX(entry);
5929                 txb = &tnapi->tx_buffers[entry];
5930
5931                 pci_unmap_page(tnapi->tp->pdev,
5932                                dma_unmap_addr(txb, mapping),
5933                                frag->size, PCI_DMA_TODEVICE);
5934         }
5935 }
5936
5937 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5938 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5939                                        struct sk_buff *skb,
5940                                        u32 base_flags, u32 mss)
5941 {
5942         struct tg3 *tp = tnapi->tp;
5943         struct sk_buff *new_skb;
5944         dma_addr_t new_addr = 0;
5945         u32 entry = tnapi->tx_prod;
5946         int ret = 0;
5947
5948         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5949                 new_skb = skb_copy(skb, GFP_ATOMIC);
5950         else {
5951                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5952
5953                 new_skb = skb_copy_expand(skb,
5954                                           skb_headroom(skb) + more_headroom,
5955                                           skb_tailroom(skb), GFP_ATOMIC);
5956         }
5957
5958         if (!new_skb) {
5959                 ret = -1;
5960         } else {
5961                 /* New SKB is guaranteed to be linear. */
5962                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5963                                           PCI_DMA_TODEVICE);
5964                 /* Make sure the mapping succeeded */
5965                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5966                         ret = -1;
5967                         dev_kfree_skb(new_skb);
5968
5969                 /* Make sure new skb does not cross any 4G boundaries.
5970                  * Drop the packet if it does.
5971                  */
5972                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5973                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5974                                          PCI_DMA_TODEVICE);
5975                         ret = -1;
5976                         dev_kfree_skb(new_skb);
5977                 } else {
5978                         tnapi->tx_buffers[entry].skb = new_skb;
5979                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5980                                            mapping, new_addr);
5981
5982                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5983                                     base_flags, 1 | (mss << 1));
5984                 }
5985         }
5986
5987         dev_kfree_skb(skb);
5988
5989         return ret;
5990 }
5991
5992 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5993
5994 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5995  * TSO header is greater than 80 bytes.
5996  */
5997 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5998 {
5999         struct sk_buff *segs, *nskb;
6000         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6001
6002         /* Estimate the number of fragments in the worst case */
6003         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6004                 netif_stop_queue(tp->dev);
6005
6006                 /* netif_tx_stop_queue() must be done before checking
6007                  * checking tx index in tg3_tx_avail() below, because in
6008                  * tg3_tx(), we update tx index before checking for
6009                  * netif_tx_queue_stopped().
6010                  */
6011                 smp_mb();
6012                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6013                         return NETDEV_TX_BUSY;
6014
6015                 netif_wake_queue(tp->dev);
6016         }
6017
6018         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6019         if (IS_ERR(segs))
6020                 goto tg3_tso_bug_end;
6021
6022         do {
6023                 nskb = segs;
6024                 segs = segs->next;
6025                 nskb->next = NULL;
6026                 tg3_start_xmit(nskb, tp->dev);
6027         } while (segs);
6028
6029 tg3_tso_bug_end:
6030         dev_kfree_skb(skb);
6031
6032         return NETDEV_TX_OK;
6033 }
6034
6035 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6036  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6037  */
6038 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6039 {
6040         struct tg3 *tp = netdev_priv(dev);
6041         u32 len, entry, base_flags, mss;
6042         int i = -1, would_hit_hwbug;
6043         dma_addr_t mapping;
6044         struct tg3_napi *tnapi;
6045         struct netdev_queue *txq;
6046         unsigned int last;
6047
6048         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6049         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6050         if (tg3_flag(tp, ENABLE_TSS))
6051                 tnapi++;
6052
6053         /* We are running in BH disabled context with netif_tx_lock
6054          * and TX reclaim runs via tp->napi.poll inside of a software
6055          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6056          * no IRQ context deadlocks to worry about either.  Rejoice!
6057          */
6058         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6059                 if (!netif_tx_queue_stopped(txq)) {
6060                         netif_tx_stop_queue(txq);
6061
6062                         /* This is a hard error, log it. */
6063                         netdev_err(dev,
6064                                    "BUG! Tx Ring full when queue awake!\n");
6065                 }
6066                 return NETDEV_TX_BUSY;
6067         }
6068
6069         entry = tnapi->tx_prod;
6070         base_flags = 0;
6071         if (skb->ip_summed == CHECKSUM_PARTIAL)
6072                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6073
6074         mss = skb_shinfo(skb)->gso_size;
6075         if (mss) {
6076                 struct iphdr *iph;
6077                 u32 tcp_opt_len, hdr_len;
6078
6079                 if (skb_header_cloned(skb) &&
6080                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6081                         dev_kfree_skb(skb);
6082                         goto out_unlock;
6083                 }
6084
6085                 iph = ip_hdr(skb);
6086                 tcp_opt_len = tcp_optlen(skb);
6087
6088                 if (skb_is_gso_v6(skb)) {
6089                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6090                 } else {
6091                         u32 ip_tcp_len;
6092
6093                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6094                         hdr_len = ip_tcp_len + tcp_opt_len;
6095
6096                         iph->check = 0;
6097                         iph->tot_len = htons(mss + hdr_len);
6098                 }
6099
6100                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6101                     tg3_flag(tp, TSO_BUG))
6102                         return tg3_tso_bug(tp, skb);
6103
6104                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6105                                TXD_FLAG_CPU_POST_DMA);
6106
6107                 if (tg3_flag(tp, HW_TSO_1) ||
6108                     tg3_flag(tp, HW_TSO_2) ||
6109                     tg3_flag(tp, HW_TSO_3)) {
6110                         tcp_hdr(skb)->check = 0;
6111                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6112                 } else
6113                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6114                                                                  iph->daddr, 0,
6115                                                                  IPPROTO_TCP,
6116                                                                  0);
6117
6118                 if (tg3_flag(tp, HW_TSO_3)) {
6119                         mss |= (hdr_len & 0xc) << 12;
6120                         if (hdr_len & 0x10)
6121                                 base_flags |= 0x00000010;
6122                         base_flags |= (hdr_len & 0x3e0) << 5;
6123                 } else if (tg3_flag(tp, HW_TSO_2))
6124                         mss |= hdr_len << 9;
6125                 else if (tg3_flag(tp, HW_TSO_1) ||
6126                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6127                         if (tcp_opt_len || iph->ihl > 5) {
6128                                 int tsflags;
6129
6130                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6131                                 mss |= (tsflags << 11);
6132                         }
6133                 } else {
6134                         if (tcp_opt_len || iph->ihl > 5) {
6135                                 int tsflags;
6136
6137                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6138                                 base_flags |= tsflags << 12;
6139                         }
6140                 }
6141         }
6142
6143         if (vlan_tx_tag_present(skb))
6144                 base_flags |= (TXD_FLAG_VLAN |
6145                                (vlan_tx_tag_get(skb) << 16));
6146
6147         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6148             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6149                 base_flags |= TXD_FLAG_JMB_PKT;
6150
6151         len = skb_headlen(skb);
6152
6153         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6154         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6155                 dev_kfree_skb(skb);
6156                 goto out_unlock;
6157         }
6158
6159         tnapi->tx_buffers[entry].skb = skb;
6160         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6161
6162         would_hit_hwbug = 0;
6163
6164         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6165                 would_hit_hwbug = 1;
6166
6167         if (tg3_4g_overflow_test(mapping, len))
6168                 would_hit_hwbug = 1;
6169
6170         if (tg3_40bit_overflow_test(tp, mapping, len))
6171                 would_hit_hwbug = 1;
6172
6173         if (tg3_flag(tp, 5701_DMA_BUG))
6174                 would_hit_hwbug = 1;
6175
6176         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6177                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6178
6179         entry = NEXT_TX(entry);
6180
6181         /* Now loop through additional data fragments, and queue them. */
6182         if (skb_shinfo(skb)->nr_frags > 0) {
6183                 last = skb_shinfo(skb)->nr_frags - 1;
6184                 for (i = 0; i <= last; i++) {
6185                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6186
6187                         len = frag->size;
6188                         mapping = pci_map_page(tp->pdev,
6189                                                frag->page,
6190                                                frag->page_offset,
6191                                                len, PCI_DMA_TODEVICE);
6192
6193                         tnapi->tx_buffers[entry].skb = NULL;
6194                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6195                                            mapping);
6196                         if (pci_dma_mapping_error(tp->pdev, mapping))
6197                                 goto dma_error;
6198
6199                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6200                             len <= 8)
6201                                 would_hit_hwbug = 1;
6202
6203                         if (tg3_4g_overflow_test(mapping, len))
6204                                 would_hit_hwbug = 1;
6205
6206                         if (tg3_40bit_overflow_test(tp, mapping, len))
6207                                 would_hit_hwbug = 1;
6208
6209                         if (tg3_flag(tp, HW_TSO_1) ||
6210                             tg3_flag(tp, HW_TSO_2) ||
6211                             tg3_flag(tp, HW_TSO_3))
6212                                 tg3_set_txd(tnapi, entry, mapping, len,
6213                                             base_flags, (i == last)|(mss << 1));
6214                         else
6215                                 tg3_set_txd(tnapi, entry, mapping, len,
6216                                             base_flags, (i == last));
6217
6218                         entry = NEXT_TX(entry);
6219                 }
6220         }
6221
6222         if (would_hit_hwbug) {
6223                 tg3_skb_error_unmap(tnapi, skb, i);
6224
6225                 /* If the workaround fails due to memory/mapping
6226                  * failure, silently drop this packet.
6227                  */
6228                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6229                         goto out_unlock;
6230
6231                 entry = NEXT_TX(tnapi->tx_prod);
6232         }
6233
6234         skb_tx_timestamp(skb);
6235
6236         /* Packets are ready, update Tx producer idx local and on card. */
6237         tw32_tx_mbox(tnapi->prodmbox, entry);
6238
6239         tnapi->tx_prod = entry;
6240         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6241                 netif_tx_stop_queue(txq);
6242
6243                 /* netif_tx_stop_queue() must be done before checking
6244                  * checking tx index in tg3_tx_avail() below, because in
6245                  * tg3_tx(), we update tx index before checking for
6246                  * netif_tx_queue_stopped().
6247                  */
6248                 smp_mb();
6249                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6250                         netif_tx_wake_queue(txq);
6251         }
6252
6253 out_unlock:
6254         mmiowb();
6255
6256         return NETDEV_TX_OK;
6257
6258 dma_error:
6259         tg3_skb_error_unmap(tnapi, skb, i);
6260         dev_kfree_skb(skb);
6261         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6262         return NETDEV_TX_OK;
6263 }
6264
6265 static void tg3_set_loopback(struct net_device *dev, u32 features)
6266 {
6267         struct tg3 *tp = netdev_priv(dev);
6268
6269         if (features & NETIF_F_LOOPBACK) {
6270                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6271                         return;
6272
6273                 /*
6274                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6275                  * loopback mode if Half-Duplex mode was negotiated earlier.
6276                  */
6277                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6278
6279                 /* Enable internal MAC loopback mode */
6280                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6281                 spin_lock_bh(&tp->lock);
6282                 tw32(MAC_MODE, tp->mac_mode);
6283                 netif_carrier_on(tp->dev);
6284                 spin_unlock_bh(&tp->lock);
6285                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6286         } else {
6287                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6288                         return;
6289
6290                 /* Disable internal MAC loopback mode */
6291                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6292                 spin_lock_bh(&tp->lock);
6293                 tw32(MAC_MODE, tp->mac_mode);
6294                 /* Force link status check */
6295                 tg3_setup_phy(tp, 1);
6296                 spin_unlock_bh(&tp->lock);
6297                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6298         }
6299 }
6300
6301 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6302 {
6303         struct tg3 *tp = netdev_priv(dev);
6304
6305         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6306                 features &= ~NETIF_F_ALL_TSO;
6307
6308         return features;
6309 }
6310
6311 static int tg3_set_features(struct net_device *dev, u32 features)
6312 {
6313         u32 changed = dev->features ^ features;
6314
6315         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6316                 tg3_set_loopback(dev, features);
6317
6318         return 0;
6319 }
6320
6321 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6322                                int new_mtu)
6323 {
6324         dev->mtu = new_mtu;
6325
6326         if (new_mtu > ETH_DATA_LEN) {
6327                 if (tg3_flag(tp, 5780_CLASS)) {
6328                         netdev_update_features(dev);
6329                         tg3_flag_clear(tp, TSO_CAPABLE);
6330                 } else {
6331                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6332                 }
6333         } else {
6334                 if (tg3_flag(tp, 5780_CLASS)) {
6335                         tg3_flag_set(tp, TSO_CAPABLE);
6336                         netdev_update_features(dev);
6337                 }
6338                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6339         }
6340 }
6341
6342 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6343 {
6344         struct tg3 *tp = netdev_priv(dev);
6345         int err;
6346
6347         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6348                 return -EINVAL;
6349
6350         if (!netif_running(dev)) {
6351                 /* We'll just catch it later when the
6352                  * device is up'd.
6353                  */
6354                 tg3_set_mtu(dev, tp, new_mtu);
6355                 return 0;
6356         }
6357
6358         tg3_phy_stop(tp);
6359
6360         tg3_netif_stop(tp);
6361
6362         tg3_full_lock(tp, 1);
6363
6364         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6365
6366         tg3_set_mtu(dev, tp, new_mtu);
6367
6368         err = tg3_restart_hw(tp, 0);
6369
6370         if (!err)
6371                 tg3_netif_start(tp);
6372
6373         tg3_full_unlock(tp);
6374
6375         if (!err)
6376                 tg3_phy_start(tp);
6377
6378         return err;
6379 }
6380
6381 static void tg3_rx_prodring_free(struct tg3 *tp,
6382                                  struct tg3_rx_prodring_set *tpr)
6383 {
6384         int i;
6385
6386         if (tpr != &tp->napi[0].prodring) {
6387                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6388                      i = (i + 1) & tp->rx_std_ring_mask)
6389                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6390                                         tp->rx_pkt_map_sz);
6391
6392                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6393                         for (i = tpr->rx_jmb_cons_idx;
6394                              i != tpr->rx_jmb_prod_idx;
6395                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6396                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6397                                                 TG3_RX_JMB_MAP_SZ);
6398                         }
6399                 }
6400
6401                 return;
6402         }
6403
6404         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6405                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6406                                 tp->rx_pkt_map_sz);
6407
6408         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6409                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6410                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6411                                         TG3_RX_JMB_MAP_SZ);
6412         }
6413 }
6414
6415 /* Initialize rx rings for packet processing.
6416  *
6417  * The chip has been shut down and the driver detached from
6418  * the networking, so no interrupts or new tx packets will
6419  * end up in the driver.  tp->{tx,}lock are held and thus
6420  * we may not sleep.
6421  */
6422 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6423                                  struct tg3_rx_prodring_set *tpr)
6424 {
6425         u32 i, rx_pkt_dma_sz;
6426
6427         tpr->rx_std_cons_idx = 0;
6428         tpr->rx_std_prod_idx = 0;
6429         tpr->rx_jmb_cons_idx = 0;
6430         tpr->rx_jmb_prod_idx = 0;
6431
6432         if (tpr != &tp->napi[0].prodring) {
6433                 memset(&tpr->rx_std_buffers[0], 0,
6434                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6435                 if (tpr->rx_jmb_buffers)
6436                         memset(&tpr->rx_jmb_buffers[0], 0,
6437                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6438                 goto done;
6439         }
6440
6441         /* Zero out all descriptors. */
6442         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6443
6444         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6445         if (tg3_flag(tp, 5780_CLASS) &&
6446             tp->dev->mtu > ETH_DATA_LEN)
6447                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6448         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6449
6450         /* Initialize invariants of the rings, we only set this
6451          * stuff once.  This works because the card does not
6452          * write into the rx buffer posting rings.
6453          */
6454         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6455                 struct tg3_rx_buffer_desc *rxd;
6456
6457                 rxd = &tpr->rx_std[i];
6458                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6459                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6460                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6461                                (i << RXD_OPAQUE_INDEX_SHIFT));
6462         }
6463
6464         /* Now allocate fresh SKBs for each rx ring. */
6465         for (i = 0; i < tp->rx_pending; i++) {
6466                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6467                         netdev_warn(tp->dev,
6468                                     "Using a smaller RX standard ring. Only "
6469                                     "%d out of %d buffers were allocated "
6470                                     "successfully\n", i, tp->rx_pending);
6471                         if (i == 0)
6472                                 goto initfail;
6473                         tp->rx_pending = i;
6474                         break;
6475                 }
6476         }
6477
6478         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6479                 goto done;
6480
6481         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6482
6483         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6484                 goto done;
6485
6486         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6487                 struct tg3_rx_buffer_desc *rxd;
6488
6489                 rxd = &tpr->rx_jmb[i].std;
6490                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6491                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6492                                   RXD_FLAG_JUMBO;
6493                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6494                        (i << RXD_OPAQUE_INDEX_SHIFT));
6495         }
6496
6497         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6498                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6499                         netdev_warn(tp->dev,
6500                                     "Using a smaller RX jumbo ring. Only %d "
6501                                     "out of %d buffers were allocated "
6502                                     "successfully\n", i, tp->rx_jumbo_pending);
6503                         if (i == 0)
6504                                 goto initfail;
6505                         tp->rx_jumbo_pending = i;
6506                         break;
6507                 }
6508         }
6509
6510 done:
6511         return 0;
6512
6513 initfail:
6514         tg3_rx_prodring_free(tp, tpr);
6515         return -ENOMEM;
6516 }
6517
6518 static void tg3_rx_prodring_fini(struct tg3 *tp,
6519                                  struct tg3_rx_prodring_set *tpr)
6520 {
6521         kfree(tpr->rx_std_buffers);
6522         tpr->rx_std_buffers = NULL;
6523         kfree(tpr->rx_jmb_buffers);
6524         tpr->rx_jmb_buffers = NULL;
6525         if (tpr->rx_std) {
6526                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6527                                   tpr->rx_std, tpr->rx_std_mapping);
6528                 tpr->rx_std = NULL;
6529         }
6530         if (tpr->rx_jmb) {
6531                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6532                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6533                 tpr->rx_jmb = NULL;
6534         }
6535 }
6536
6537 static int tg3_rx_prodring_init(struct tg3 *tp,
6538                                 struct tg3_rx_prodring_set *tpr)
6539 {
6540         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6541                                       GFP_KERNEL);
6542         if (!tpr->rx_std_buffers)
6543                 return -ENOMEM;
6544
6545         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6546                                          TG3_RX_STD_RING_BYTES(tp),
6547                                          &tpr->rx_std_mapping,
6548                                          GFP_KERNEL);
6549         if (!tpr->rx_std)
6550                 goto err_out;
6551
6552         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6553                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6554                                               GFP_KERNEL);
6555                 if (!tpr->rx_jmb_buffers)
6556                         goto err_out;
6557
6558                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6559                                                  TG3_RX_JMB_RING_BYTES(tp),
6560                                                  &tpr->rx_jmb_mapping,
6561                                                  GFP_KERNEL);
6562                 if (!tpr->rx_jmb)
6563                         goto err_out;
6564         }
6565
6566         return 0;
6567
6568 err_out:
6569         tg3_rx_prodring_fini(tp, tpr);
6570         return -ENOMEM;
6571 }
6572
6573 /* Free up pending packets in all rx/tx rings.
6574  *
6575  * The chip has been shut down and the driver detached from
6576  * the networking, so no interrupts or new tx packets will
6577  * end up in the driver.  tp->{tx,}lock is not held and we are not
6578  * in an interrupt context and thus may sleep.
6579  */
6580 static void tg3_free_rings(struct tg3 *tp)
6581 {
6582         int i, j;
6583
6584         for (j = 0; j < tp->irq_cnt; j++) {
6585                 struct tg3_napi *tnapi = &tp->napi[j];
6586
6587                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6588
6589                 if (!tnapi->tx_buffers)
6590                         continue;
6591
6592                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6593                         struct ring_info *txp;
6594                         struct sk_buff *skb;
6595                         unsigned int k;
6596
6597                         txp = &tnapi->tx_buffers[i];
6598                         skb = txp->skb;
6599
6600                         if (skb == NULL) {
6601                                 i++;
6602                                 continue;
6603                         }
6604
6605                         pci_unmap_single(tp->pdev,
6606                                          dma_unmap_addr(txp, mapping),
6607                                          skb_headlen(skb),
6608                                          PCI_DMA_TODEVICE);
6609                         txp->skb = NULL;
6610
6611                         i++;
6612
6613                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6614                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6615                                 pci_unmap_page(tp->pdev,
6616                                                dma_unmap_addr(txp, mapping),
6617                                                skb_shinfo(skb)->frags[k].size,
6618                                                PCI_DMA_TODEVICE);
6619                                 i++;
6620                         }
6621
6622                         dev_kfree_skb_any(skb);
6623                 }
6624         }
6625 }
6626
6627 /* Initialize tx/rx rings for packet processing.
6628  *
6629  * The chip has been shut down and the driver detached from
6630  * the networking, so no interrupts or new tx packets will
6631  * end up in the driver.  tp->{tx,}lock are held and thus
6632  * we may not sleep.
6633  */
6634 static int tg3_init_rings(struct tg3 *tp)
6635 {
6636         int i;
6637
6638         /* Free up all the SKBs. */
6639         tg3_free_rings(tp);
6640
6641         for (i = 0; i < tp->irq_cnt; i++) {
6642                 struct tg3_napi *tnapi = &tp->napi[i];
6643
6644                 tnapi->last_tag = 0;
6645                 tnapi->last_irq_tag = 0;
6646                 tnapi->hw_status->status = 0;
6647                 tnapi->hw_status->status_tag = 0;
6648                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6649
6650                 tnapi->tx_prod = 0;
6651                 tnapi->tx_cons = 0;
6652                 if (tnapi->tx_ring)
6653                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6654
6655                 tnapi->rx_rcb_ptr = 0;
6656                 if (tnapi->rx_rcb)
6657                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6658
6659                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6660                         tg3_free_rings(tp);
6661                         return -ENOMEM;
6662                 }
6663         }
6664
6665         return 0;
6666 }
6667
6668 /*
6669  * Must not be invoked with interrupt sources disabled and
6670  * the hardware shutdown down.
6671  */
6672 static void tg3_free_consistent(struct tg3 *tp)
6673 {
6674         int i;
6675
6676         for (i = 0; i < tp->irq_cnt; i++) {
6677                 struct tg3_napi *tnapi = &tp->napi[i];
6678
6679                 if (tnapi->tx_ring) {
6680                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6681                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6682                         tnapi->tx_ring = NULL;
6683                 }
6684
6685                 kfree(tnapi->tx_buffers);
6686                 tnapi->tx_buffers = NULL;
6687
6688                 if (tnapi->rx_rcb) {
6689                         dma_free_coherent(&tp->pdev->dev,
6690                                           TG3_RX_RCB_RING_BYTES(tp),
6691                                           tnapi->rx_rcb,
6692                                           tnapi->rx_rcb_mapping);
6693                         tnapi->rx_rcb = NULL;
6694                 }
6695
6696                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6697
6698                 if (tnapi->hw_status) {
6699                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6700                                           tnapi->hw_status,
6701                                           tnapi->status_mapping);
6702                         tnapi->hw_status = NULL;
6703                 }
6704         }
6705
6706         if (tp->hw_stats) {
6707                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6708                                   tp->hw_stats, tp->stats_mapping);
6709                 tp->hw_stats = NULL;
6710         }
6711 }
6712
6713 /*
6714  * Must not be invoked with interrupt sources disabled and
6715  * the hardware shutdown down.  Can sleep.
6716  */
6717 static int tg3_alloc_consistent(struct tg3 *tp)
6718 {
6719         int i;
6720
6721         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6722                                           sizeof(struct tg3_hw_stats),
6723                                           &tp->stats_mapping,
6724                                           GFP_KERNEL);
6725         if (!tp->hw_stats)
6726                 goto err_out;
6727
6728         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6729
6730         for (i = 0; i < tp->irq_cnt; i++) {
6731                 struct tg3_napi *tnapi = &tp->napi[i];
6732                 struct tg3_hw_status *sblk;
6733
6734                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6735                                                       TG3_HW_STATUS_SIZE,
6736                                                       &tnapi->status_mapping,
6737                                                       GFP_KERNEL);
6738                 if (!tnapi->hw_status)
6739                         goto err_out;
6740
6741                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6742                 sblk = tnapi->hw_status;
6743
6744                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6745                         goto err_out;
6746
6747                 /* If multivector TSS is enabled, vector 0 does not handle
6748                  * tx interrupts.  Don't allocate any resources for it.
6749                  */
6750                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6751                     (i && tg3_flag(tp, ENABLE_TSS))) {
6752                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6753                                                     TG3_TX_RING_SIZE,
6754                                                     GFP_KERNEL);
6755                         if (!tnapi->tx_buffers)
6756                                 goto err_out;
6757
6758                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6759                                                             TG3_TX_RING_BYTES,
6760                                                         &tnapi->tx_desc_mapping,
6761                                                             GFP_KERNEL);
6762                         if (!tnapi->tx_ring)
6763                                 goto err_out;
6764                 }
6765
6766                 /*
6767                  * When RSS is enabled, the status block format changes
6768                  * slightly.  The "rx_jumbo_consumer", "reserved",
6769                  * and "rx_mini_consumer" members get mapped to the
6770                  * other three rx return ring producer indexes.
6771                  */
6772                 switch (i) {
6773                 default:
6774                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6775                         break;
6776                 case 2:
6777                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6778                         break;
6779                 case 3:
6780                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6781                         break;
6782                 case 4:
6783                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6784                         break;
6785                 }
6786
6787                 /*
6788                  * If multivector RSS is enabled, vector 0 does not handle
6789                  * rx or tx interrupts.  Don't allocate any resources for it.
6790                  */
6791                 if (!i && tg3_flag(tp, ENABLE_RSS))
6792                         continue;
6793
6794                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6795                                                    TG3_RX_RCB_RING_BYTES(tp),
6796                                                    &tnapi->rx_rcb_mapping,
6797                                                    GFP_KERNEL);
6798                 if (!tnapi->rx_rcb)
6799                         goto err_out;
6800
6801                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6802         }
6803
6804         return 0;
6805
6806 err_out:
6807         tg3_free_consistent(tp);
6808         return -ENOMEM;
6809 }
6810
6811 #define MAX_WAIT_CNT 1000
6812
6813 /* To stop a block, clear the enable bit and poll till it
6814  * clears.  tp->lock is held.
6815  */
6816 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6817 {
6818         unsigned int i;
6819         u32 val;
6820
6821         if (tg3_flag(tp, 5705_PLUS)) {
6822                 switch (ofs) {
6823                 case RCVLSC_MODE:
6824                 case DMAC_MODE:
6825                 case MBFREE_MODE:
6826                 case BUFMGR_MODE:
6827                 case MEMARB_MODE:
6828                         /* We can't enable/disable these bits of the
6829                          * 5705/5750, just say success.
6830                          */
6831                         return 0;
6832
6833                 default:
6834                         break;
6835                 }
6836         }
6837
6838         val = tr32(ofs);
6839         val &= ~enable_bit;
6840         tw32_f(ofs, val);
6841
6842         for (i = 0; i < MAX_WAIT_CNT; i++) {
6843                 udelay(100);
6844                 val = tr32(ofs);
6845                 if ((val & enable_bit) == 0)
6846                         break;
6847         }
6848
6849         if (i == MAX_WAIT_CNT && !silent) {
6850                 dev_err(&tp->pdev->dev,
6851                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6852                         ofs, enable_bit);
6853                 return -ENODEV;
6854         }
6855
6856         return 0;
6857 }
6858
6859 /* tp->lock is held. */
6860 static int tg3_abort_hw(struct tg3 *tp, int silent)
6861 {
6862         int i, err;
6863
6864         tg3_disable_ints(tp);
6865
6866         tp->rx_mode &= ~RX_MODE_ENABLE;
6867         tw32_f(MAC_RX_MODE, tp->rx_mode);
6868         udelay(10);
6869
6870         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6871         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6872         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6873         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6874         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6875         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6876
6877         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6878         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6879         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6880         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6881         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6882         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6883         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6884
6885         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6886         tw32_f(MAC_MODE, tp->mac_mode);
6887         udelay(40);
6888
6889         tp->tx_mode &= ~TX_MODE_ENABLE;
6890         tw32_f(MAC_TX_MODE, tp->tx_mode);
6891
6892         for (i = 0; i < MAX_WAIT_CNT; i++) {
6893                 udelay(100);
6894                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6895                         break;
6896         }
6897         if (i >= MAX_WAIT_CNT) {
6898                 dev_err(&tp->pdev->dev,
6899                         "%s timed out, TX_MODE_ENABLE will not clear "
6900                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6901                 err |= -ENODEV;
6902         }
6903
6904         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6905         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6906         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6907
6908         tw32(FTQ_RESET, 0xffffffff);
6909         tw32(FTQ_RESET, 0x00000000);
6910
6911         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6912         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6913
6914         for (i = 0; i < tp->irq_cnt; i++) {
6915                 struct tg3_napi *tnapi = &tp->napi[i];
6916                 if (tnapi->hw_status)
6917                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6918         }
6919         if (tp->hw_stats)
6920                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6921
6922         return err;
6923 }
6924
6925 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6926 {
6927         int i;
6928         u32 apedata;
6929
6930         /* NCSI does not support APE events */
6931         if (tg3_flag(tp, APE_HAS_NCSI))
6932                 return;
6933
6934         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6935         if (apedata != APE_SEG_SIG_MAGIC)
6936                 return;
6937
6938         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6939         if (!(apedata & APE_FW_STATUS_READY))
6940                 return;
6941
6942         /* Wait for up to 1 millisecond for APE to service previous event. */
6943         for (i = 0; i < 10; i++) {
6944                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6945                         return;
6946
6947                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6948
6949                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6950                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6951                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6952
6953                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6954
6955                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6956                         break;
6957
6958                 udelay(100);
6959         }
6960
6961         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6962                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6963 }
6964
6965 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6966 {
6967         u32 event;
6968         u32 apedata;
6969
6970         if (!tg3_flag(tp, ENABLE_APE))
6971                 return;
6972
6973         switch (kind) {
6974         case RESET_KIND_INIT:
6975                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6976                                 APE_HOST_SEG_SIG_MAGIC);
6977                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6978                                 APE_HOST_SEG_LEN_MAGIC);
6979                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6980                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6981                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6982                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6983                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6984                                 APE_HOST_BEHAV_NO_PHYLOCK);
6985                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6986                                     TG3_APE_HOST_DRVR_STATE_START);
6987
6988                 event = APE_EVENT_STATUS_STATE_START;
6989                 break;
6990         case RESET_KIND_SHUTDOWN:
6991                 /* With the interface we are currently using,
6992                  * APE does not track driver state.  Wiping
6993                  * out the HOST SEGMENT SIGNATURE forces
6994                  * the APE to assume OS absent status.
6995                  */
6996                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6997
6998                 if (device_may_wakeup(&tp->pdev->dev) &&
6999                     tg3_flag(tp, WOL_ENABLE)) {
7000                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7001                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7002                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7003                 } else
7004                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7005
7006                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7007
7008                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7009                 break;
7010         case RESET_KIND_SUSPEND:
7011                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7012                 break;
7013         default:
7014                 return;
7015         }
7016
7017         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7018
7019         tg3_ape_send_event(tp, event);
7020 }
7021
7022 /* tp->lock is held. */
7023 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7024 {
7025         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7026                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7027
7028         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7029                 switch (kind) {
7030                 case RESET_KIND_INIT:
7031                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7032                                       DRV_STATE_START);
7033                         break;
7034
7035                 case RESET_KIND_SHUTDOWN:
7036                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7037                                       DRV_STATE_UNLOAD);
7038                         break;
7039
7040                 case RESET_KIND_SUSPEND:
7041                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7042                                       DRV_STATE_SUSPEND);
7043                         break;
7044
7045                 default:
7046                         break;
7047                 }
7048         }
7049
7050         if (kind == RESET_KIND_INIT ||
7051             kind == RESET_KIND_SUSPEND)
7052                 tg3_ape_driver_state_change(tp, kind);
7053 }
7054
7055 /* tp->lock is held. */
7056 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7057 {
7058         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7059                 switch (kind) {
7060                 case RESET_KIND_INIT:
7061                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7062                                       DRV_STATE_START_DONE);
7063                         break;
7064
7065                 case RESET_KIND_SHUTDOWN:
7066                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7067                                       DRV_STATE_UNLOAD_DONE);
7068                         break;
7069
7070                 default:
7071                         break;
7072                 }
7073         }
7074
7075         if (kind == RESET_KIND_SHUTDOWN)
7076                 tg3_ape_driver_state_change(tp, kind);
7077 }
7078
7079 /* tp->lock is held. */
7080 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7081 {
7082         if (tg3_flag(tp, ENABLE_ASF)) {
7083                 switch (kind) {
7084                 case RESET_KIND_INIT:
7085                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7086                                       DRV_STATE_START);
7087                         break;
7088
7089                 case RESET_KIND_SHUTDOWN:
7090                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7091                                       DRV_STATE_UNLOAD);
7092                         break;
7093
7094                 case RESET_KIND_SUSPEND:
7095                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7096                                       DRV_STATE_SUSPEND);
7097                         break;
7098
7099                 default:
7100                         break;
7101                 }
7102         }
7103 }
7104
7105 static int tg3_poll_fw(struct tg3 *tp)
7106 {
7107         int i;
7108         u32 val;
7109
7110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7111                 /* Wait up to 20ms for init done. */
7112                 for (i = 0; i < 200; i++) {
7113                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7114                                 return 0;
7115                         udelay(100);
7116                 }
7117                 return -ENODEV;
7118         }
7119
7120         /* Wait for firmware initialization to complete. */
7121         for (i = 0; i < 100000; i++) {
7122                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7123                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7124                         break;
7125                 udelay(10);
7126         }
7127
7128         /* Chip might not be fitted with firmware.  Some Sun onboard
7129          * parts are configured like that.  So don't signal the timeout
7130          * of the above loop as an error, but do report the lack of
7131          * running firmware once.
7132          */
7133         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7134                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7135
7136                 netdev_info(tp->dev, "No firmware running\n");
7137         }
7138
7139         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7140                 /* The 57765 A0 needs a little more
7141                  * time to do some important work.
7142                  */
7143                 mdelay(10);
7144         }
7145
7146         return 0;
7147 }
7148
7149 /* Save PCI command register before chip reset */
7150 static void tg3_save_pci_state(struct tg3 *tp)
7151 {
7152         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7153 }
7154
7155 /* Restore PCI state after chip reset */
7156 static void tg3_restore_pci_state(struct tg3 *tp)
7157 {
7158         u32 val;
7159
7160         /* Re-enable indirect register accesses. */
7161         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7162                                tp->misc_host_ctrl);
7163
7164         /* Set MAX PCI retry to zero. */
7165         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7166         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7167             tg3_flag(tp, PCIX_MODE))
7168                 val |= PCISTATE_RETRY_SAME_DMA;
7169         /* Allow reads and writes to the APE register and memory space. */
7170         if (tg3_flag(tp, ENABLE_APE))
7171                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7172                        PCISTATE_ALLOW_APE_SHMEM_WR |
7173                        PCISTATE_ALLOW_APE_PSPACE_WR;
7174         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7175
7176         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7177
7178         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7179                 if (tg3_flag(tp, PCI_EXPRESS))
7180                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7181                 else {
7182                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7183                                               tp->pci_cacheline_sz);
7184                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7185                                               tp->pci_lat_timer);
7186                 }
7187         }
7188
7189         /* Make sure PCI-X relaxed ordering bit is clear. */
7190         if (tg3_flag(tp, PCIX_MODE)) {
7191                 u16 pcix_cmd;
7192
7193                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7194                                      &pcix_cmd);
7195                 pcix_cmd &= ~PCI_X_CMD_ERO;
7196                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7197                                       pcix_cmd);
7198         }
7199
7200         if (tg3_flag(tp, 5780_CLASS)) {
7201
7202                 /* Chip reset on 5780 will reset MSI enable bit,
7203                  * so need to restore it.
7204                  */
7205                 if (tg3_flag(tp, USING_MSI)) {
7206                         u16 ctrl;
7207
7208                         pci_read_config_word(tp->pdev,
7209                                              tp->msi_cap + PCI_MSI_FLAGS,
7210                                              &ctrl);
7211                         pci_write_config_word(tp->pdev,
7212                                               tp->msi_cap + PCI_MSI_FLAGS,
7213                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7214                         val = tr32(MSGINT_MODE);
7215                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7216                 }
7217         }
7218 }
7219
7220 static void tg3_stop_fw(struct tg3 *);
7221
7222 /* tp->lock is held. */
7223 static int tg3_chip_reset(struct tg3 *tp)
7224 {
7225         u32 val;
7226         void (*write_op)(struct tg3 *, u32, u32);
7227         int i, err;
7228
7229         tg3_nvram_lock(tp);
7230
7231         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7232
7233         /* No matching tg3_nvram_unlock() after this because
7234          * chip reset below will undo the nvram lock.
7235          */
7236         tp->nvram_lock_cnt = 0;
7237
7238         /* GRC_MISC_CFG core clock reset will clear the memory
7239          * enable bit in PCI register 4 and the MSI enable bit
7240          * on some chips, so we save relevant registers here.
7241          */
7242         tg3_save_pci_state(tp);
7243
7244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7245             tg3_flag(tp, 5755_PLUS))
7246                 tw32(GRC_FASTBOOT_PC, 0);
7247
7248         /*
7249          * We must avoid the readl() that normally takes place.
7250          * It locks machines, causes machine checks, and other
7251          * fun things.  So, temporarily disable the 5701
7252          * hardware workaround, while we do the reset.
7253          */
7254         write_op = tp->write32;
7255         if (write_op == tg3_write_flush_reg32)
7256                 tp->write32 = tg3_write32;
7257
7258         /* Prevent the irq handler from reading or writing PCI registers
7259          * during chip reset when the memory enable bit in the PCI command
7260          * register may be cleared.  The chip does not generate interrupt
7261          * at this time, but the irq handler may still be called due to irq
7262          * sharing or irqpoll.
7263          */
7264         tg3_flag_set(tp, CHIP_RESETTING);
7265         for (i = 0; i < tp->irq_cnt; i++) {
7266                 struct tg3_napi *tnapi = &tp->napi[i];
7267                 if (tnapi->hw_status) {
7268                         tnapi->hw_status->status = 0;
7269                         tnapi->hw_status->status_tag = 0;
7270                 }
7271                 tnapi->last_tag = 0;
7272                 tnapi->last_irq_tag = 0;
7273         }
7274         smp_mb();
7275
7276         for (i = 0; i < tp->irq_cnt; i++)
7277                 synchronize_irq(tp->napi[i].irq_vec);
7278
7279         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7280                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7281                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7282         }
7283
7284         /* do the reset */
7285         val = GRC_MISC_CFG_CORECLK_RESET;
7286
7287         if (tg3_flag(tp, PCI_EXPRESS)) {
7288                 /* Force PCIe 1.0a mode */
7289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7290                     !tg3_flag(tp, 57765_PLUS) &&
7291                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7292                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7293                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7294
7295                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7296                         tw32(GRC_MISC_CFG, (1 << 29));
7297                         val |= (1 << 29);
7298                 }
7299         }
7300
7301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7302                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7303                 tw32(GRC_VCPU_EXT_CTRL,
7304                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7305         }
7306
7307         /* Manage gphy power for all CPMU absent PCIe devices. */
7308         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7309                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7310
7311         tw32(GRC_MISC_CFG, val);
7312
7313         /* restore 5701 hardware bug workaround write method */
7314         tp->write32 = write_op;
7315
7316         /* Unfortunately, we have to delay before the PCI read back.
7317          * Some 575X chips even will not respond to a PCI cfg access
7318          * when the reset command is given to the chip.
7319          *
7320          * How do these hardware designers expect things to work
7321          * properly if the PCI write is posted for a long period
7322          * of time?  It is always necessary to have some method by
7323          * which a register read back can occur to push the write
7324          * out which does the reset.
7325          *
7326          * For most tg3 variants the trick below was working.
7327          * Ho hum...
7328          */
7329         udelay(120);
7330
7331         /* Flush PCI posted writes.  The normal MMIO registers
7332          * are inaccessible at this time so this is the only
7333          * way to make this reliably (actually, this is no longer
7334          * the case, see above).  I tried to use indirect
7335          * register read/write but this upset some 5701 variants.
7336          */
7337         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7338
7339         udelay(120);
7340
7341         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7342                 u16 val16;
7343
7344                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7345                         int i;
7346                         u32 cfg_val;
7347
7348                         /* Wait for link training to complete.  */
7349                         for (i = 0; i < 5000; i++)
7350                                 udelay(100);
7351
7352                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7353                         pci_write_config_dword(tp->pdev, 0xc4,
7354                                                cfg_val | (1 << 15));
7355                 }
7356
7357                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7358                 pci_read_config_word(tp->pdev,
7359                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7360                                      &val16);
7361                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7362                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7363                 /*
7364                  * Older PCIe devices only support the 128 byte
7365                  * MPS setting.  Enforce the restriction.
7366                  */
7367                 if (!tg3_flag(tp, CPMU_PRESENT))
7368                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7369                 pci_write_config_word(tp->pdev,
7370                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7371                                       val16);
7372
7373                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7374
7375                 /* Clear error status */
7376                 pci_write_config_word(tp->pdev,
7377                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7378                                       PCI_EXP_DEVSTA_CED |
7379                                       PCI_EXP_DEVSTA_NFED |
7380                                       PCI_EXP_DEVSTA_FED |
7381                                       PCI_EXP_DEVSTA_URD);
7382         }
7383
7384         tg3_restore_pci_state(tp);
7385
7386         tg3_flag_clear(tp, CHIP_RESETTING);
7387         tg3_flag_clear(tp, ERROR_PROCESSED);
7388
7389         val = 0;
7390         if (tg3_flag(tp, 5780_CLASS))
7391                 val = tr32(MEMARB_MODE);
7392         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7393
7394         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7395                 tg3_stop_fw(tp);
7396                 tw32(0x5000, 0x400);
7397         }
7398
7399         tw32(GRC_MODE, tp->grc_mode);
7400
7401         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7402                 val = tr32(0xc4);
7403
7404                 tw32(0xc4, val | (1 << 15));
7405         }
7406
7407         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7408             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7409                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7410                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7411                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7412                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7413         }
7414
7415         if (tg3_flag(tp, ENABLE_APE))
7416                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7417                                MAC_MODE_APE_RX_EN |
7418                                MAC_MODE_TDE_ENABLE;
7419
7420         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7421                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7422                 val = tp->mac_mode;
7423         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7424                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7425                 val = tp->mac_mode;
7426         } else
7427                 val = 0;
7428
7429         tw32_f(MAC_MODE, val);
7430         udelay(40);
7431
7432         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7433
7434         err = tg3_poll_fw(tp);
7435         if (err)
7436                 return err;
7437
7438         tg3_mdio_start(tp);
7439
7440         if (tg3_flag(tp, PCI_EXPRESS) &&
7441             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7442             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7443             !tg3_flag(tp, 57765_PLUS)) {
7444                 val = tr32(0x7c00);
7445
7446                 tw32(0x7c00, val | (1 << 25));
7447         }
7448
7449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7450                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7451                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7452         }
7453
7454         /* Reprobe ASF enable state.  */
7455         tg3_flag_clear(tp, ENABLE_ASF);
7456         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7457         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7458         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7459                 u32 nic_cfg;
7460
7461                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7462                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7463                         tg3_flag_set(tp, ENABLE_ASF);
7464                         tp->last_event_jiffies = jiffies;
7465                         if (tg3_flag(tp, 5750_PLUS))
7466                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7467                 }
7468         }
7469
7470         return 0;
7471 }
7472
7473 /* tp->lock is held. */
7474 static void tg3_stop_fw(struct tg3 *tp)
7475 {
7476         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7477                 /* Wait for RX cpu to ACK the previous event. */
7478                 tg3_wait_for_event_ack(tp);
7479
7480                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7481
7482                 tg3_generate_fw_event(tp);
7483
7484                 /* Wait for RX cpu to ACK this event. */
7485                 tg3_wait_for_event_ack(tp);
7486         }
7487 }
7488
7489 /* tp->lock is held. */
7490 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7491 {
7492         int err;
7493
7494         tg3_stop_fw(tp);
7495
7496         tg3_write_sig_pre_reset(tp, kind);
7497
7498         tg3_abort_hw(tp, silent);
7499         err = tg3_chip_reset(tp);
7500
7501         __tg3_set_mac_addr(tp, 0);
7502
7503         tg3_write_sig_legacy(tp, kind);
7504         tg3_write_sig_post_reset(tp, kind);
7505
7506         if (err)
7507                 return err;
7508
7509         return 0;
7510 }
7511
7512 #define RX_CPU_SCRATCH_BASE     0x30000
7513 #define RX_CPU_SCRATCH_SIZE     0x04000
7514 #define TX_CPU_SCRATCH_BASE     0x34000
7515 #define TX_CPU_SCRATCH_SIZE     0x04000
7516
7517 /* tp->lock is held. */
7518 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7519 {
7520         int i;
7521
7522         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7523
7524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7525                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7526
7527                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7528                 return 0;
7529         }
7530         if (offset == RX_CPU_BASE) {
7531                 for (i = 0; i < 10000; i++) {
7532                         tw32(offset + CPU_STATE, 0xffffffff);
7533                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7534                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7535                                 break;
7536                 }
7537
7538                 tw32(offset + CPU_STATE, 0xffffffff);
7539                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7540                 udelay(10);
7541         } else {
7542                 for (i = 0; i < 10000; i++) {
7543                         tw32(offset + CPU_STATE, 0xffffffff);
7544                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7545                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7546                                 break;
7547                 }
7548         }
7549
7550         if (i >= 10000) {
7551                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7552                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7553                 return -ENODEV;
7554         }
7555
7556         /* Clear firmware's nvram arbitration. */
7557         if (tg3_flag(tp, NVRAM))
7558                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7559         return 0;
7560 }
7561
7562 struct fw_info {
7563         unsigned int fw_base;
7564         unsigned int fw_len;
7565         const __be32 *fw_data;
7566 };
7567
7568 /* tp->lock is held. */
7569 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7570                                  int cpu_scratch_size, struct fw_info *info)
7571 {
7572         int err, lock_err, i;
7573         void (*write_op)(struct tg3 *, u32, u32);
7574
7575         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7576                 netdev_err(tp->dev,
7577                            "%s: Trying to load TX cpu firmware which is 5705\n",
7578                            __func__);
7579                 return -EINVAL;
7580         }
7581
7582         if (tg3_flag(tp, 5705_PLUS))
7583                 write_op = tg3_write_mem;
7584         else
7585                 write_op = tg3_write_indirect_reg32;
7586
7587         /* It is possible that bootcode is still loading at this point.
7588          * Get the nvram lock first before halting the cpu.
7589          */
7590         lock_err = tg3_nvram_lock(tp);
7591         err = tg3_halt_cpu(tp, cpu_base);
7592         if (!lock_err)
7593                 tg3_nvram_unlock(tp);
7594         if (err)
7595                 goto out;
7596
7597         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7598                 write_op(tp, cpu_scratch_base + i, 0);
7599         tw32(cpu_base + CPU_STATE, 0xffffffff);
7600         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7601         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7602                 write_op(tp, (cpu_scratch_base +
7603                               (info->fw_base & 0xffff) +
7604                               (i * sizeof(u32))),
7605                               be32_to_cpu(info->fw_data[i]));
7606
7607         err = 0;
7608
7609 out:
7610         return err;
7611 }
7612
7613 /* tp->lock is held. */
7614 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7615 {
7616         struct fw_info info;
7617         const __be32 *fw_data;
7618         int err, i;
7619
7620         fw_data = (void *)tp->fw->data;
7621
7622         /* Firmware blob starts with version numbers, followed by
7623            start address and length. We are setting complete length.
7624            length = end_address_of_bss - start_address_of_text.
7625            Remainder is the blob to be loaded contiguously
7626            from start address. */
7627
7628         info.fw_base = be32_to_cpu(fw_data[1]);
7629         info.fw_len = tp->fw->size - 12;
7630         info.fw_data = &fw_data[3];
7631
7632         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7633                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7634                                     &info);
7635         if (err)
7636                 return err;
7637
7638         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7639                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7640                                     &info);
7641         if (err)
7642                 return err;
7643
7644         /* Now startup only the RX cpu. */
7645         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7646         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7647
7648         for (i = 0; i < 5; i++) {
7649                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7650                         break;
7651                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7652                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7653                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7654                 udelay(1000);
7655         }
7656         if (i >= 5) {
7657                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7658                            "should be %08x\n", __func__,
7659                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7660                 return -ENODEV;
7661         }
7662         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7663         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7664
7665         return 0;
7666 }
7667
7668 /* tp->lock is held. */
7669 static int tg3_load_tso_firmware(struct tg3 *tp)
7670 {
7671         struct fw_info info;
7672         const __be32 *fw_data;
7673         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7674         int err, i;
7675
7676         if (tg3_flag(tp, HW_TSO_1) ||
7677             tg3_flag(tp, HW_TSO_2) ||
7678             tg3_flag(tp, HW_TSO_3))
7679                 return 0;
7680
7681         fw_data = (void *)tp->fw->data;
7682
7683         /* Firmware blob starts with version numbers, followed by
7684            start address and length. We are setting complete length.
7685            length = end_address_of_bss - start_address_of_text.
7686            Remainder is the blob to be loaded contiguously
7687            from start address. */
7688
7689         info.fw_base = be32_to_cpu(fw_data[1]);
7690         cpu_scratch_size = tp->fw_len;
7691         info.fw_len = tp->fw->size - 12;
7692         info.fw_data = &fw_data[3];
7693
7694         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7695                 cpu_base = RX_CPU_BASE;
7696                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7697         } else {
7698                 cpu_base = TX_CPU_BASE;
7699                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7700                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7701         }
7702
7703         err = tg3_load_firmware_cpu(tp, cpu_base,
7704                                     cpu_scratch_base, cpu_scratch_size,
7705                                     &info);
7706         if (err)
7707                 return err;
7708
7709         /* Now startup the cpu. */
7710         tw32(cpu_base + CPU_STATE, 0xffffffff);
7711         tw32_f(cpu_base + CPU_PC, info.fw_base);
7712
7713         for (i = 0; i < 5; i++) {
7714                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7715                         break;
7716                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7717                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7718                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7719                 udelay(1000);
7720         }
7721         if (i >= 5) {
7722                 netdev_err(tp->dev,
7723                            "%s fails to set CPU PC, is %08x should be %08x\n",
7724                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7725                 return -ENODEV;
7726         }
7727         tw32(cpu_base + CPU_STATE, 0xffffffff);
7728         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7729         return 0;
7730 }
7731
7732
7733 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7734 {
7735         struct tg3 *tp = netdev_priv(dev);
7736         struct sockaddr *addr = p;
7737         int err = 0, skip_mac_1 = 0;
7738
7739         if (!is_valid_ether_addr(addr->sa_data))
7740                 return -EINVAL;
7741
7742         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7743
7744         if (!netif_running(dev))
7745                 return 0;
7746
7747         if (tg3_flag(tp, ENABLE_ASF)) {
7748                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7749
7750                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7751                 addr0_low = tr32(MAC_ADDR_0_LOW);
7752                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7753                 addr1_low = tr32(MAC_ADDR_1_LOW);
7754
7755                 /* Skip MAC addr 1 if ASF is using it. */
7756                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7757                     !(addr1_high == 0 && addr1_low == 0))
7758                         skip_mac_1 = 1;
7759         }
7760         spin_lock_bh(&tp->lock);
7761         __tg3_set_mac_addr(tp, skip_mac_1);
7762         spin_unlock_bh(&tp->lock);
7763
7764         return err;
7765 }
7766
7767 /* tp->lock is held. */
7768 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7769                            dma_addr_t mapping, u32 maxlen_flags,
7770                            u32 nic_addr)
7771 {
7772         tg3_write_mem(tp,
7773                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7774                       ((u64) mapping >> 32));
7775         tg3_write_mem(tp,
7776                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7777                       ((u64) mapping & 0xffffffff));
7778         tg3_write_mem(tp,
7779                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7780                        maxlen_flags);
7781
7782         if (!tg3_flag(tp, 5705_PLUS))
7783                 tg3_write_mem(tp,
7784                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7785                               nic_addr);
7786 }
7787
7788 static void __tg3_set_rx_mode(struct net_device *);
7789 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7790 {
7791         int i;
7792
7793         if (!tg3_flag(tp, ENABLE_TSS)) {
7794                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7795                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7796                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7797         } else {
7798                 tw32(HOSTCC_TXCOL_TICKS, 0);
7799                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7800                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7801         }
7802
7803         if (!tg3_flag(tp, ENABLE_RSS)) {
7804                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7805                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7806                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7807         } else {
7808                 tw32(HOSTCC_RXCOL_TICKS, 0);
7809                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7810                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7811         }
7812
7813         if (!tg3_flag(tp, 5705_PLUS)) {
7814                 u32 val = ec->stats_block_coalesce_usecs;
7815
7816                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7817                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7818
7819                 if (!netif_carrier_ok(tp->dev))
7820                         val = 0;
7821
7822                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7823         }
7824
7825         for (i = 0; i < tp->irq_cnt - 1; i++) {
7826                 u32 reg;
7827
7828                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7829                 tw32(reg, ec->rx_coalesce_usecs);
7830                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7831                 tw32(reg, ec->rx_max_coalesced_frames);
7832                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7833                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7834
7835                 if (tg3_flag(tp, ENABLE_TSS)) {
7836                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7837                         tw32(reg, ec->tx_coalesce_usecs);
7838                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7839                         tw32(reg, ec->tx_max_coalesced_frames);
7840                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7841                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7842                 }
7843         }
7844
7845         for (; i < tp->irq_max - 1; i++) {
7846                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7847                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7848                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7849
7850                 if (tg3_flag(tp, ENABLE_TSS)) {
7851                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7852                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7853                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7854                 }
7855         }
7856 }
7857
7858 /* tp->lock is held. */
7859 static void tg3_rings_reset(struct tg3 *tp)
7860 {
7861         int i;
7862         u32 stblk, txrcb, rxrcb, limit;
7863         struct tg3_napi *tnapi = &tp->napi[0];
7864
7865         /* Disable all transmit rings but the first. */
7866         if (!tg3_flag(tp, 5705_PLUS))
7867                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7868         else if (tg3_flag(tp, 5717_PLUS))
7869                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7870         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7871                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7872         else
7873                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7874
7875         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7876              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7877                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7878                               BDINFO_FLAGS_DISABLED);
7879
7880
7881         /* Disable all receive return rings but the first. */
7882         if (tg3_flag(tp, 5717_PLUS))
7883                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7884         else if (!tg3_flag(tp, 5705_PLUS))
7885                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7886         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7887                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7888                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7889         else
7890                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7891
7892         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7893              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7894                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7895                               BDINFO_FLAGS_DISABLED);
7896
7897         /* Disable interrupts */
7898         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7899         tp->napi[0].chk_msi_cnt = 0;
7900         tp->napi[0].last_rx_cons = 0;
7901         tp->napi[0].last_tx_cons = 0;
7902
7903         /* Zero mailbox registers. */
7904         if (tg3_flag(tp, SUPPORT_MSIX)) {
7905                 for (i = 1; i < tp->irq_max; i++) {
7906                         tp->napi[i].tx_prod = 0;
7907                         tp->napi[i].tx_cons = 0;
7908                         if (tg3_flag(tp, ENABLE_TSS))
7909                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7910                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7911                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7912                         tp->napi[0].chk_msi_cnt = 0;
7913                         tp->napi[i].last_rx_cons = 0;
7914                         tp->napi[i].last_tx_cons = 0;
7915                 }
7916                 if (!tg3_flag(tp, ENABLE_TSS))
7917                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7918         } else {
7919                 tp->napi[0].tx_prod = 0;
7920                 tp->napi[0].tx_cons = 0;
7921                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7922                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7923         }
7924
7925         /* Make sure the NIC-based send BD rings are disabled. */
7926         if (!tg3_flag(tp, 5705_PLUS)) {
7927                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7928                 for (i = 0; i < 16; i++)
7929                         tw32_tx_mbox(mbox + i * 8, 0);
7930         }
7931
7932         txrcb = NIC_SRAM_SEND_RCB;
7933         rxrcb = NIC_SRAM_RCV_RET_RCB;
7934
7935         /* Clear status block in ram. */
7936         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7937
7938         /* Set status block DMA address */
7939         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7940              ((u64) tnapi->status_mapping >> 32));
7941         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7942              ((u64) tnapi->status_mapping & 0xffffffff));
7943
7944         if (tnapi->tx_ring) {
7945                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7946                                (TG3_TX_RING_SIZE <<
7947                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7948                                NIC_SRAM_TX_BUFFER_DESC);
7949                 txrcb += TG3_BDINFO_SIZE;
7950         }
7951
7952         if (tnapi->rx_rcb) {
7953                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7954                                (tp->rx_ret_ring_mask + 1) <<
7955                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7956                 rxrcb += TG3_BDINFO_SIZE;
7957         }
7958
7959         stblk = HOSTCC_STATBLCK_RING1;
7960
7961         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7962                 u64 mapping = (u64)tnapi->status_mapping;
7963                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7964                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7965
7966                 /* Clear status block in ram. */
7967                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7968
7969                 if (tnapi->tx_ring) {
7970                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7971                                        (TG3_TX_RING_SIZE <<
7972                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7973                                        NIC_SRAM_TX_BUFFER_DESC);
7974                         txrcb += TG3_BDINFO_SIZE;
7975                 }
7976
7977                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7978                                ((tp->rx_ret_ring_mask + 1) <<
7979                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7980
7981                 stblk += 8;
7982                 rxrcb += TG3_BDINFO_SIZE;
7983         }
7984 }
7985
7986 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7987 {
7988         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7989
7990         if (!tg3_flag(tp, 5750_PLUS) ||
7991             tg3_flag(tp, 5780_CLASS) ||
7992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7993             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7994                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7995         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7996                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7997                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7998         else
7999                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8000
8001         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8002         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8003
8004         val = min(nic_rep_thresh, host_rep_thresh);
8005         tw32(RCVBDI_STD_THRESH, val);
8006
8007         if (tg3_flag(tp, 57765_PLUS))
8008                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8009
8010         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8011                 return;
8012
8013         if (!tg3_flag(tp, 5705_PLUS))
8014                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8015         else
8016                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8017
8018         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8019
8020         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8021         tw32(RCVBDI_JUMBO_THRESH, val);
8022
8023         if (tg3_flag(tp, 57765_PLUS))
8024                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8025 }
8026
8027 /* tp->lock is held. */
8028 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8029 {
8030         u32 val, rdmac_mode;
8031         int i, err, limit;
8032         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8033
8034         tg3_disable_ints(tp);
8035
8036         tg3_stop_fw(tp);
8037
8038         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8039
8040         if (tg3_flag(tp, INIT_COMPLETE))
8041                 tg3_abort_hw(tp, 1);
8042
8043         /* Enable MAC control of LPI */
8044         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8045                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8046                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8047                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8048
8049                 tw32_f(TG3_CPMU_EEE_CTRL,
8050                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8051
8052                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8053                       TG3_CPMU_EEEMD_LPI_IN_TX |
8054                       TG3_CPMU_EEEMD_LPI_IN_RX |
8055                       TG3_CPMU_EEEMD_EEE_ENABLE;
8056
8057                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8058                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8059
8060                 if (tg3_flag(tp, ENABLE_APE))
8061                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8062
8063                 tw32_f(TG3_CPMU_EEE_MODE, val);
8064
8065                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8066                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8067                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8068
8069                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8070                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8071                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8072         }
8073
8074         if (reset_phy)
8075                 tg3_phy_reset(tp);
8076
8077         err = tg3_chip_reset(tp);
8078         if (err)
8079                 return err;
8080
8081         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8082
8083         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8084                 val = tr32(TG3_CPMU_CTRL);
8085                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8086                 tw32(TG3_CPMU_CTRL, val);
8087
8088                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8089                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8090                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8091                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8092
8093                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8094                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8095                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8096                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8097
8098                 val = tr32(TG3_CPMU_HST_ACC);
8099                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8100                 val |= CPMU_HST_ACC_MACCLK_6_25;
8101                 tw32(TG3_CPMU_HST_ACC, val);
8102         }
8103
8104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8105                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8106                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8107                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8108                 tw32(PCIE_PWR_MGMT_THRESH, val);
8109
8110                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8111                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8112
8113                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8114
8115                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8116                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8117         }
8118
8119         if (tg3_flag(tp, L1PLLPD_EN)) {
8120                 u32 grc_mode = tr32(GRC_MODE);
8121
8122                 /* Access the lower 1K of PL PCIE block registers. */
8123                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8124                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8125
8126                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8127                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8128                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8129
8130                 tw32(GRC_MODE, grc_mode);
8131         }
8132
8133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8134                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8135                         u32 grc_mode = tr32(GRC_MODE);
8136
8137                         /* Access the lower 1K of PL PCIE block registers. */
8138                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8139                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8140
8141                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8142                                    TG3_PCIE_PL_LO_PHYCTL5);
8143                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8144                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8145
8146                         tw32(GRC_MODE, grc_mode);
8147                 }
8148
8149                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8150                         u32 grc_mode = tr32(GRC_MODE);
8151
8152                         /* Access the lower 1K of DL PCIE block registers. */
8153                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8154                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8155
8156                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8157                                    TG3_PCIE_DL_LO_FTSMAX);
8158                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8159                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8160                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8161
8162                         tw32(GRC_MODE, grc_mode);
8163                 }
8164
8165                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8166                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8167                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8168                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8169         }
8170
8171         /* This works around an issue with Athlon chipsets on
8172          * B3 tigon3 silicon.  This bit has no effect on any
8173          * other revision.  But do not set this on PCI Express
8174          * chips and don't even touch the clocks if the CPMU is present.
8175          */
8176         if (!tg3_flag(tp, CPMU_PRESENT)) {
8177                 if (!tg3_flag(tp, PCI_EXPRESS))
8178                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8179                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8180         }
8181
8182         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8183             tg3_flag(tp, PCIX_MODE)) {
8184                 val = tr32(TG3PCI_PCISTATE);
8185                 val |= PCISTATE_RETRY_SAME_DMA;
8186                 tw32(TG3PCI_PCISTATE, val);
8187         }
8188
8189         if (tg3_flag(tp, ENABLE_APE)) {
8190                 /* Allow reads and writes to the
8191                  * APE register and memory space.
8192                  */
8193                 val = tr32(TG3PCI_PCISTATE);
8194                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8195                        PCISTATE_ALLOW_APE_SHMEM_WR |
8196                        PCISTATE_ALLOW_APE_PSPACE_WR;
8197                 tw32(TG3PCI_PCISTATE, val);
8198         }
8199
8200         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8201                 /* Enable some hw fixes.  */
8202                 val = tr32(TG3PCI_MSI_DATA);
8203                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8204                 tw32(TG3PCI_MSI_DATA, val);
8205         }
8206
8207         /* Descriptor ring init may make accesses to the
8208          * NIC SRAM area to setup the TX descriptors, so we
8209          * can only do this after the hardware has been
8210          * successfully reset.
8211          */
8212         err = tg3_init_rings(tp);
8213         if (err)
8214                 return err;
8215
8216         if (tg3_flag(tp, 57765_PLUS)) {
8217                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8218                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8219                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8220                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8222                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8223                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8224                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8225         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8226                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8227                 /* This value is determined during the probe time DMA
8228                  * engine test, tg3_test_dma.
8229                  */
8230                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8231         }
8232
8233         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8234                           GRC_MODE_4X_NIC_SEND_RINGS |
8235                           GRC_MODE_NO_TX_PHDR_CSUM |
8236                           GRC_MODE_NO_RX_PHDR_CSUM);
8237         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8238
8239         /* Pseudo-header checksum is done by hardware logic and not
8240          * the offload processers, so make the chip do the pseudo-
8241          * header checksums on receive.  For transmit it is more
8242          * convenient to do the pseudo-header checksum in software
8243          * as Linux does that on transmit for us in all cases.
8244          */
8245         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8246
8247         tw32(GRC_MODE,
8248              tp->grc_mode |
8249              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8250
8251         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8252         val = tr32(GRC_MISC_CFG);
8253         val &= ~0xff;
8254         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8255         tw32(GRC_MISC_CFG, val);
8256
8257         /* Initialize MBUF/DESC pool. */
8258         if (tg3_flag(tp, 5750_PLUS)) {
8259                 /* Do nothing.  */
8260         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8261                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8262                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8263                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8264                 else
8265                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8266                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8267                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8268         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8269                 int fw_len;
8270
8271                 fw_len = tp->fw_len;
8272                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8273                 tw32(BUFMGR_MB_POOL_ADDR,
8274                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8275                 tw32(BUFMGR_MB_POOL_SIZE,
8276                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8277         }
8278
8279         if (tp->dev->mtu <= ETH_DATA_LEN) {
8280                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8281                      tp->bufmgr_config.mbuf_read_dma_low_water);
8282                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8283                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8284                 tw32(BUFMGR_MB_HIGH_WATER,
8285                      tp->bufmgr_config.mbuf_high_water);
8286         } else {
8287                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8288                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8289                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8290                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8291                 tw32(BUFMGR_MB_HIGH_WATER,
8292                      tp->bufmgr_config.mbuf_high_water_jumbo);
8293         }
8294         tw32(BUFMGR_DMA_LOW_WATER,
8295              tp->bufmgr_config.dma_low_water);
8296         tw32(BUFMGR_DMA_HIGH_WATER,
8297              tp->bufmgr_config.dma_high_water);
8298
8299         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8301                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8303             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8304             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8305                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8306         tw32(BUFMGR_MODE, val);
8307         for (i = 0; i < 2000; i++) {
8308                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8309                         break;
8310                 udelay(10);
8311         }
8312         if (i >= 2000) {
8313                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8314                 return -ENODEV;
8315         }
8316
8317         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8318                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8319
8320         tg3_setup_rxbd_thresholds(tp);
8321
8322         /* Initialize TG3_BDINFO's at:
8323          *  RCVDBDI_STD_BD:     standard eth size rx ring
8324          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8325          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8326          *
8327          * like so:
8328          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8329          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8330          *                              ring attribute flags
8331          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8332          *
8333          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8334          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8335          *
8336          * The size of each ring is fixed in the firmware, but the location is
8337          * configurable.
8338          */
8339         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8340              ((u64) tpr->rx_std_mapping >> 32));
8341         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8342              ((u64) tpr->rx_std_mapping & 0xffffffff));
8343         if (!tg3_flag(tp, 5717_PLUS))
8344                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8345                      NIC_SRAM_RX_BUFFER_DESC);
8346
8347         /* Disable the mini ring */
8348         if (!tg3_flag(tp, 5705_PLUS))
8349                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8350                      BDINFO_FLAGS_DISABLED);
8351
8352         /* Program the jumbo buffer descriptor ring control
8353          * blocks on those devices that have them.
8354          */
8355         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8356             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8357
8358                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8359                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8360                              ((u64) tpr->rx_jmb_mapping >> 32));
8361                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8362                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8363                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8364                               BDINFO_FLAGS_MAXLEN_SHIFT;
8365                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8366                              val | BDINFO_FLAGS_USE_EXT_RECV);
8367                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8368                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8369                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8370                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8371                 } else {
8372                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8373                              BDINFO_FLAGS_DISABLED);
8374                 }
8375
8376                 if (tg3_flag(tp, 57765_PLUS)) {
8377                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8378                                 val = TG3_RX_STD_MAX_SIZE_5700;
8379                         else
8380                                 val = TG3_RX_STD_MAX_SIZE_5717;
8381                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8382                         val |= (TG3_RX_STD_DMA_SZ << 2);
8383                 } else
8384                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8385         } else
8386                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8387
8388         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8389
8390         tpr->rx_std_prod_idx = tp->rx_pending;
8391         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8392
8393         tpr->rx_jmb_prod_idx =
8394                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8395         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8396
8397         tg3_rings_reset(tp);
8398
8399         /* Initialize MAC address and backoff seed. */
8400         __tg3_set_mac_addr(tp, 0);
8401
8402         /* MTU + ethernet header + FCS + optional VLAN tag */
8403         tw32(MAC_RX_MTU_SIZE,
8404              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8405
8406         /* The slot time is changed by tg3_setup_phy if we
8407          * run at gigabit with half duplex.
8408          */
8409         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8410               (6 << TX_LENGTHS_IPG_SHIFT) |
8411               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8412
8413         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8414                 val |= tr32(MAC_TX_LENGTHS) &
8415                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8416                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8417
8418         tw32(MAC_TX_LENGTHS, val);
8419
8420         /* Receive rules. */
8421         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8422         tw32(RCVLPC_CONFIG, 0x0181);
8423
8424         /* Calculate RDMAC_MODE setting early, we need it to determine
8425          * the RCVLPC_STATE_ENABLE mask.
8426          */
8427         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8428                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8429                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8430                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8431                       RDMAC_MODE_LNGREAD_ENAB);
8432
8433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8434                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8435
8436         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8438             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8439                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8440                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8441                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8442
8443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8444             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8445                 if (tg3_flag(tp, TSO_CAPABLE) &&
8446                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8447                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8448                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8449                            !tg3_flag(tp, IS_5788)) {
8450                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8451                 }
8452         }
8453
8454         if (tg3_flag(tp, PCI_EXPRESS))
8455                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8456
8457         if (tg3_flag(tp, HW_TSO_1) ||
8458             tg3_flag(tp, HW_TSO_2) ||
8459             tg3_flag(tp, HW_TSO_3))
8460                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8461
8462         if (tg3_flag(tp, 57765_PLUS) ||
8463             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8465                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8466
8467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8468                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8469
8470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8474             tg3_flag(tp, 57765_PLUS)) {
8475                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8476                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8477                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8478                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8479                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8480                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8481                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8482                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8483                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8484                 }
8485                 tw32(TG3_RDMA_RSRVCTRL_REG,
8486                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8487         }
8488
8489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8491                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8492                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8493                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8494                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8495         }
8496
8497         /* Receive/send statistics. */
8498         if (tg3_flag(tp, 5750_PLUS)) {
8499                 val = tr32(RCVLPC_STATS_ENABLE);
8500                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8501                 tw32(RCVLPC_STATS_ENABLE, val);
8502         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8503                    tg3_flag(tp, TSO_CAPABLE)) {
8504                 val = tr32(RCVLPC_STATS_ENABLE);
8505                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8506                 tw32(RCVLPC_STATS_ENABLE, val);
8507         } else {
8508                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8509         }
8510         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8511         tw32(SNDDATAI_STATSENAB, 0xffffff);
8512         tw32(SNDDATAI_STATSCTRL,
8513              (SNDDATAI_SCTRL_ENABLE |
8514               SNDDATAI_SCTRL_FASTUPD));
8515
8516         /* Setup host coalescing engine. */
8517         tw32(HOSTCC_MODE, 0);
8518         for (i = 0; i < 2000; i++) {
8519                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8520                         break;
8521                 udelay(10);
8522         }
8523
8524         __tg3_set_coalesce(tp, &tp->coal);
8525
8526         if (!tg3_flag(tp, 5705_PLUS)) {
8527                 /* Status/statistics block address.  See tg3_timer,
8528                  * the tg3_periodic_fetch_stats call there, and
8529                  * tg3_get_stats to see how this works for 5705/5750 chips.
8530                  */
8531                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8532                      ((u64) tp->stats_mapping >> 32));
8533                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8534                      ((u64) tp->stats_mapping & 0xffffffff));
8535                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8536
8537                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8538
8539                 /* Clear statistics and status block memory areas */
8540                 for (i = NIC_SRAM_STATS_BLK;
8541                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8542                      i += sizeof(u32)) {
8543                         tg3_write_mem(tp, i, 0);
8544                         udelay(40);
8545                 }
8546         }
8547
8548         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8549
8550         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8551         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8552         if (!tg3_flag(tp, 5705_PLUS))
8553                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8554
8555         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8556                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8557                 /* reset to prevent losing 1st rx packet intermittently */
8558                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8559                 udelay(10);
8560         }
8561
8562         if (tg3_flag(tp, ENABLE_APE))
8563                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8564         else
8565                 tp->mac_mode = 0;
8566         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8567                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8568         if (!tg3_flag(tp, 5705_PLUS) &&
8569             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8570             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8571                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8572         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8573         udelay(40);
8574
8575         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8576          * If TG3_FLAG_IS_NIC is zero, we should read the
8577          * register to preserve the GPIO settings for LOMs. The GPIOs,
8578          * whether used as inputs or outputs, are set by boot code after
8579          * reset.
8580          */
8581         if (!tg3_flag(tp, IS_NIC)) {
8582                 u32 gpio_mask;
8583
8584                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8585                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8586                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8587
8588                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8589                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8590                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8591
8592                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8593                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8594
8595                 tp->grc_local_ctrl &= ~gpio_mask;
8596                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8597
8598                 /* GPIO1 must be driven high for eeprom write protect */
8599                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8600                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8601                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8602         }
8603         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8604         udelay(100);
8605
8606         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8607                 val = tr32(MSGINT_MODE);
8608                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8609                 tw32(MSGINT_MODE, val);
8610         }
8611
8612         if (!tg3_flag(tp, 5705_PLUS)) {
8613                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8614                 udelay(40);
8615         }
8616
8617         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8618                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8619                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8620                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8621                WDMAC_MODE_LNGREAD_ENAB);
8622
8623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8624             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8625                 if (tg3_flag(tp, TSO_CAPABLE) &&
8626                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8627                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8628                         /* nothing */
8629                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8630                            !tg3_flag(tp, IS_5788)) {
8631                         val |= WDMAC_MODE_RX_ACCEL;
8632                 }
8633         }
8634
8635         /* Enable host coalescing bug fix */
8636         if (tg3_flag(tp, 5755_PLUS))
8637                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8638
8639         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8640                 val |= WDMAC_MODE_BURST_ALL_DATA;
8641
8642         tw32_f(WDMAC_MODE, val);
8643         udelay(40);
8644
8645         if (tg3_flag(tp, PCIX_MODE)) {
8646                 u16 pcix_cmd;
8647
8648                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8649                                      &pcix_cmd);
8650                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8651                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8652                         pcix_cmd |= PCI_X_CMD_READ_2K;
8653                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8654                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8655                         pcix_cmd |= PCI_X_CMD_READ_2K;
8656                 }
8657                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8658                                       pcix_cmd);
8659         }
8660
8661         tw32_f(RDMAC_MODE, rdmac_mode);
8662         udelay(40);
8663
8664         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8665         if (!tg3_flag(tp, 5705_PLUS))
8666                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8667
8668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8669                 tw32(SNDDATAC_MODE,
8670                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8671         else
8672                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8673
8674         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8675         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8676         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8677         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8678                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8679         tw32(RCVDBDI_MODE, val);
8680         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8681         if (tg3_flag(tp, HW_TSO_1) ||
8682             tg3_flag(tp, HW_TSO_2) ||
8683             tg3_flag(tp, HW_TSO_3))
8684                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8685         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8686         if (tg3_flag(tp, ENABLE_TSS))
8687                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8688         tw32(SNDBDI_MODE, val);
8689         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8690
8691         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8692                 err = tg3_load_5701_a0_firmware_fix(tp);
8693                 if (err)
8694                         return err;
8695         }
8696
8697         if (tg3_flag(tp, TSO_CAPABLE)) {
8698                 err = tg3_load_tso_firmware(tp);
8699                 if (err)
8700                         return err;
8701         }
8702
8703         tp->tx_mode = TX_MODE_ENABLE;
8704
8705         if (tg3_flag(tp, 5755_PLUS) ||
8706             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8707                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8708
8709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8710                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8711                 tp->tx_mode &= ~val;
8712                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8713         }
8714
8715         tw32_f(MAC_TX_MODE, tp->tx_mode);
8716         udelay(100);
8717
8718         if (tg3_flag(tp, ENABLE_RSS)) {
8719                 u32 reg = MAC_RSS_INDIR_TBL_0;
8720                 u8 *ent = (u8 *)&val;
8721
8722                 /* Setup the indirection table */
8723                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8724                         int idx = i % sizeof(val);
8725
8726                         ent[idx] = i % (tp->irq_cnt - 1);
8727                         if (idx == sizeof(val) - 1) {
8728                                 tw32(reg, val);
8729                                 reg += 4;
8730                         }
8731                 }
8732
8733                 /* Setup the "secret" hash key. */
8734                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8735                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8736                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8737                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8738                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8739                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8740                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8741                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8742                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8743                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8744         }
8745
8746         tp->rx_mode = RX_MODE_ENABLE;
8747         if (tg3_flag(tp, 5755_PLUS))
8748                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8749
8750         if (tg3_flag(tp, ENABLE_RSS))
8751                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8752                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8753                                RX_MODE_RSS_IPV6_HASH_EN |
8754                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8755                                RX_MODE_RSS_IPV4_HASH_EN |
8756                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8757
8758         tw32_f(MAC_RX_MODE, tp->rx_mode);
8759         udelay(10);
8760
8761         tw32(MAC_LED_CTRL, tp->led_ctrl);
8762
8763         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8764         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8765                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8766                 udelay(10);
8767         }
8768         tw32_f(MAC_RX_MODE, tp->rx_mode);
8769         udelay(10);
8770
8771         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8772                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8773                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8774                         /* Set drive transmission level to 1.2V  */
8775                         /* only if the signal pre-emphasis bit is not set  */
8776                         val = tr32(MAC_SERDES_CFG);
8777                         val &= 0xfffff000;
8778                         val |= 0x880;
8779                         tw32(MAC_SERDES_CFG, val);
8780                 }
8781                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8782                         tw32(MAC_SERDES_CFG, 0x616000);
8783         }
8784
8785         /* Prevent chip from dropping frames when flow control
8786          * is enabled.
8787          */
8788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8789                 val = 1;
8790         else
8791                 val = 2;
8792         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8793
8794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8795             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8796                 /* Use hardware link auto-negotiation */
8797                 tg3_flag_set(tp, HW_AUTONEG);
8798         }
8799
8800         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8801             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8802                 u32 tmp;
8803
8804                 tmp = tr32(SERDES_RX_CTRL);
8805                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8806                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8807                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8808                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8809         }
8810
8811         if (!tg3_flag(tp, USE_PHYLIB)) {
8812                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8813                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8814                         tp->link_config.speed = tp->link_config.orig_speed;
8815                         tp->link_config.duplex = tp->link_config.orig_duplex;
8816                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8817                 }
8818
8819                 err = tg3_setup_phy(tp, 0);
8820                 if (err)
8821                         return err;
8822
8823                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8824                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8825                         u32 tmp;
8826
8827                         /* Clear CRC stats. */
8828                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8829                                 tg3_writephy(tp, MII_TG3_TEST1,
8830                                              tmp | MII_TG3_TEST1_CRC_EN);
8831                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8832                         }
8833                 }
8834         }
8835
8836         __tg3_set_rx_mode(tp->dev);
8837
8838         /* Initialize receive rules. */
8839         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8840         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8841         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8842         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8843
8844         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8845                 limit = 8;
8846         else
8847                 limit = 16;
8848         if (tg3_flag(tp, ENABLE_ASF))
8849                 limit -= 4;
8850         switch (limit) {
8851         case 16:
8852                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8853         case 15:
8854                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8855         case 14:
8856                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8857         case 13:
8858                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8859         case 12:
8860                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8861         case 11:
8862                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8863         case 10:
8864                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8865         case 9:
8866                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8867         case 8:
8868                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8869         case 7:
8870                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8871         case 6:
8872                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8873         case 5:
8874                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8875         case 4:
8876                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8877         case 3:
8878                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8879         case 2:
8880         case 1:
8881
8882         default:
8883                 break;
8884         }
8885
8886         if (tg3_flag(tp, ENABLE_APE))
8887                 /* Write our heartbeat update interval to APE. */
8888                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8889                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8890
8891         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8892
8893         return 0;
8894 }
8895
8896 /* Called at device open time to get the chip ready for
8897  * packet processing.  Invoked with tp->lock held.
8898  */
8899 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8900 {
8901         tg3_switch_clocks(tp);
8902
8903         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8904
8905         return tg3_reset_hw(tp, reset_phy);
8906 }
8907
8908 #define TG3_STAT_ADD32(PSTAT, REG) \
8909 do {    u32 __val = tr32(REG); \
8910         (PSTAT)->low += __val; \
8911         if ((PSTAT)->low < __val) \
8912                 (PSTAT)->high += 1; \
8913 } while (0)
8914
8915 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8916 {
8917         struct tg3_hw_stats *sp = tp->hw_stats;
8918
8919         if (!netif_carrier_ok(tp->dev))
8920                 return;
8921
8922         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8923         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8924         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8925         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8926         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8927         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8928         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8929         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8930         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8931         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8932         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8933         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8934         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8935
8936         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8937         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8938         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8939         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8940         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8941         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8942         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8943         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8944         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8945         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8946         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8947         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8948         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8949         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8950
8951         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8952         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8953             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8954             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8955                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8956         } else {
8957                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8958                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8959                 if (val) {
8960                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8961                         sp->rx_discards.low += val;
8962                         if (sp->rx_discards.low < val)
8963                                 sp->rx_discards.high += 1;
8964                 }
8965                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8966         }
8967         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8968 }
8969
8970 static void tg3_chk_missed_msi(struct tg3 *tp)
8971 {
8972         u32 i;
8973
8974         for (i = 0; i < tp->irq_cnt; i++) {
8975                 struct tg3_napi *tnapi = &tp->napi[i];
8976
8977                 if (tg3_has_work(tnapi)) {
8978                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8979                             tnapi->last_tx_cons == tnapi->tx_cons) {
8980                                 if (tnapi->chk_msi_cnt < 1) {
8981                                         tnapi->chk_msi_cnt++;
8982                                         return;
8983                                 }
8984                                 tw32_mailbox(tnapi->int_mbox,
8985                                              tnapi->last_tag << 24);
8986                         }
8987                 }
8988                 tnapi->chk_msi_cnt = 0;
8989                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8990                 tnapi->last_tx_cons = tnapi->tx_cons;
8991         }
8992 }
8993
8994 static void tg3_timer(unsigned long __opaque)
8995 {
8996         struct tg3 *tp = (struct tg3 *) __opaque;
8997
8998         if (tp->irq_sync)
8999                 goto restart_timer;
9000
9001         spin_lock(&tp->lock);
9002
9003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9004             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9005                 tg3_chk_missed_msi(tp);
9006
9007         if (!tg3_flag(tp, TAGGED_STATUS)) {
9008                 /* All of this garbage is because when using non-tagged
9009                  * IRQ status the mailbox/status_block protocol the chip
9010                  * uses with the cpu is race prone.
9011                  */
9012                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9013                         tw32(GRC_LOCAL_CTRL,
9014                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9015                 } else {
9016                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9017                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9018                 }
9019
9020                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9021                         tg3_flag_set(tp, RESTART_TIMER);
9022                         spin_unlock(&tp->lock);
9023                         schedule_work(&tp->reset_task);
9024                         return;
9025                 }
9026         }
9027
9028         /* This part only runs once per second. */
9029         if (!--tp->timer_counter) {
9030                 if (tg3_flag(tp, 5705_PLUS))
9031                         tg3_periodic_fetch_stats(tp);
9032
9033                 if (tp->setlpicnt && !--tp->setlpicnt)
9034                         tg3_phy_eee_enable(tp);
9035
9036                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9037                         u32 mac_stat;
9038                         int phy_event;
9039
9040                         mac_stat = tr32(MAC_STATUS);
9041
9042                         phy_event = 0;
9043                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9044                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9045                                         phy_event = 1;
9046                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9047                                 phy_event = 1;
9048
9049                         if (phy_event)
9050                                 tg3_setup_phy(tp, 0);
9051                 } else if (tg3_flag(tp, POLL_SERDES)) {
9052                         u32 mac_stat = tr32(MAC_STATUS);
9053                         int need_setup = 0;
9054
9055                         if (netif_carrier_ok(tp->dev) &&
9056                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9057                                 need_setup = 1;
9058                         }
9059                         if (!netif_carrier_ok(tp->dev) &&
9060                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9061                                          MAC_STATUS_SIGNAL_DET))) {
9062                                 need_setup = 1;
9063                         }
9064                         if (need_setup) {
9065                                 if (!tp->serdes_counter) {
9066                                         tw32_f(MAC_MODE,
9067                                              (tp->mac_mode &
9068                                               ~MAC_MODE_PORT_MODE_MASK));
9069                                         udelay(40);
9070                                         tw32_f(MAC_MODE, tp->mac_mode);
9071                                         udelay(40);
9072                                 }
9073                                 tg3_setup_phy(tp, 0);
9074                         }
9075                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9076                            tg3_flag(tp, 5780_CLASS)) {
9077                         tg3_serdes_parallel_detect(tp);
9078                 }
9079
9080                 tp->timer_counter = tp->timer_multiplier;
9081         }
9082
9083         /* Heartbeat is only sent once every 2 seconds.
9084          *
9085          * The heartbeat is to tell the ASF firmware that the host
9086          * driver is still alive.  In the event that the OS crashes,
9087          * ASF needs to reset the hardware to free up the FIFO space
9088          * that may be filled with rx packets destined for the host.
9089          * If the FIFO is full, ASF will no longer function properly.
9090          *
9091          * Unintended resets have been reported on real time kernels
9092          * where the timer doesn't run on time.  Netpoll will also have
9093          * same problem.
9094          *
9095          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9096          * to check the ring condition when the heartbeat is expiring
9097          * before doing the reset.  This will prevent most unintended
9098          * resets.
9099          */
9100         if (!--tp->asf_counter) {
9101                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9102                         tg3_wait_for_event_ack(tp);
9103
9104                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9105                                       FWCMD_NICDRV_ALIVE3);
9106                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9107                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9108                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9109
9110                         tg3_generate_fw_event(tp);
9111                 }
9112                 tp->asf_counter = tp->asf_multiplier;
9113         }
9114
9115         spin_unlock(&tp->lock);
9116
9117 restart_timer:
9118         tp->timer.expires = jiffies + tp->timer_offset;
9119         add_timer(&tp->timer);
9120 }
9121
9122 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9123 {
9124         irq_handler_t fn;
9125         unsigned long flags;
9126         char *name;
9127         struct tg3_napi *tnapi = &tp->napi[irq_num];
9128
9129         if (tp->irq_cnt == 1)
9130                 name = tp->dev->name;
9131         else {
9132                 name = &tnapi->irq_lbl[0];
9133                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9134                 name[IFNAMSIZ-1] = 0;
9135         }
9136
9137         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9138                 fn = tg3_msi;
9139                 if (tg3_flag(tp, 1SHOT_MSI))
9140                         fn = tg3_msi_1shot;
9141                 flags = 0;
9142         } else {
9143                 fn = tg3_interrupt;
9144                 if (tg3_flag(tp, TAGGED_STATUS))
9145                         fn = tg3_interrupt_tagged;
9146                 flags = IRQF_SHARED;
9147         }
9148
9149         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9150 }
9151
9152 static int tg3_test_interrupt(struct tg3 *tp)
9153 {
9154         struct tg3_napi *tnapi = &tp->napi[0];
9155         struct net_device *dev = tp->dev;
9156         int err, i, intr_ok = 0;
9157         u32 val;
9158
9159         if (!netif_running(dev))
9160                 return -ENODEV;
9161
9162         tg3_disable_ints(tp);
9163
9164         free_irq(tnapi->irq_vec, tnapi);
9165
9166         /*
9167          * Turn off MSI one shot mode.  Otherwise this test has no
9168          * observable way to know whether the interrupt was delivered.
9169          */
9170         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9171                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9172                 tw32(MSGINT_MODE, val);
9173         }
9174
9175         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9176                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9177         if (err)
9178                 return err;
9179
9180         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9181         tg3_enable_ints(tp);
9182
9183         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9184                tnapi->coal_now);
9185
9186         for (i = 0; i < 5; i++) {
9187                 u32 int_mbox, misc_host_ctrl;
9188
9189                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9190                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9191
9192                 if ((int_mbox != 0) ||
9193                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9194                         intr_ok = 1;
9195                         break;
9196                 }
9197
9198                 msleep(10);
9199         }
9200
9201         tg3_disable_ints(tp);
9202
9203         free_irq(tnapi->irq_vec, tnapi);
9204
9205         err = tg3_request_irq(tp, 0);
9206
9207         if (err)
9208                 return err;
9209
9210         if (intr_ok) {
9211                 /* Reenable MSI one shot mode. */
9212                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9213                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9214                         tw32(MSGINT_MODE, val);
9215                 }
9216                 return 0;
9217         }
9218
9219         return -EIO;
9220 }
9221
9222 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9223  * successfully restored
9224  */
9225 static int tg3_test_msi(struct tg3 *tp)
9226 {
9227         int err;
9228         u16 pci_cmd;
9229
9230         if (!tg3_flag(tp, USING_MSI))
9231                 return 0;
9232
9233         /* Turn off SERR reporting in case MSI terminates with Master
9234          * Abort.
9235          */
9236         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9237         pci_write_config_word(tp->pdev, PCI_COMMAND,
9238                               pci_cmd & ~PCI_COMMAND_SERR);
9239
9240         err = tg3_test_interrupt(tp);
9241
9242         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9243
9244         if (!err)
9245                 return 0;
9246
9247         /* other failures */
9248         if (err != -EIO)
9249                 return err;
9250
9251         /* MSI test failed, go back to INTx mode */
9252         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9253                     "to INTx mode. Please report this failure to the PCI "
9254                     "maintainer and include system chipset information\n");
9255
9256         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9257
9258         pci_disable_msi(tp->pdev);
9259
9260         tg3_flag_clear(tp, USING_MSI);
9261         tp->napi[0].irq_vec = tp->pdev->irq;
9262
9263         err = tg3_request_irq(tp, 0);
9264         if (err)
9265                 return err;
9266
9267         /* Need to reset the chip because the MSI cycle may have terminated
9268          * with Master Abort.
9269          */
9270         tg3_full_lock(tp, 1);
9271
9272         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9273         err = tg3_init_hw(tp, 1);
9274
9275         tg3_full_unlock(tp);
9276
9277         if (err)
9278                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9279
9280         return err;
9281 }
9282
9283 static int tg3_request_firmware(struct tg3 *tp)
9284 {
9285         const __be32 *fw_data;
9286
9287         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9288                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9289                            tp->fw_needed);
9290                 return -ENOENT;
9291         }
9292
9293         fw_data = (void *)tp->fw->data;
9294
9295         /* Firmware blob starts with version numbers, followed by
9296          * start address and _full_ length including BSS sections
9297          * (which must be longer than the actual data, of course
9298          */
9299
9300         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9301         if (tp->fw_len < (tp->fw->size - 12)) {
9302                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9303                            tp->fw_len, tp->fw_needed);
9304                 release_firmware(tp->fw);
9305                 tp->fw = NULL;
9306                 return -EINVAL;
9307         }
9308
9309         /* We no longer need firmware; we have it. */
9310         tp->fw_needed = NULL;
9311         return 0;
9312 }
9313
9314 static bool tg3_enable_msix(struct tg3 *tp)
9315 {
9316         int i, rc, cpus = num_online_cpus();
9317         struct msix_entry msix_ent[tp->irq_max];
9318
9319         if (cpus == 1)
9320                 /* Just fallback to the simpler MSI mode. */
9321                 return false;
9322
9323         /*
9324          * We want as many rx rings enabled as there are cpus.
9325          * The first MSIX vector only deals with link interrupts, etc,
9326          * so we add one to the number of vectors we are requesting.
9327          */
9328         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9329
9330         for (i = 0; i < tp->irq_max; i++) {
9331                 msix_ent[i].entry  = i;
9332                 msix_ent[i].vector = 0;
9333         }
9334
9335         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9336         if (rc < 0) {
9337                 return false;
9338         } else if (rc != 0) {
9339                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9340                         return false;
9341                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9342                               tp->irq_cnt, rc);
9343                 tp->irq_cnt = rc;
9344         }
9345
9346         for (i = 0; i < tp->irq_max; i++)
9347                 tp->napi[i].irq_vec = msix_ent[i].vector;
9348
9349         netif_set_real_num_tx_queues(tp->dev, 1);
9350         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9351         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9352                 pci_disable_msix(tp->pdev);
9353                 return false;
9354         }
9355
9356         if (tp->irq_cnt > 1) {
9357                 tg3_flag_set(tp, ENABLE_RSS);
9358
9359                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9360                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9361                         tg3_flag_set(tp, ENABLE_TSS);
9362                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9363                 }
9364         }
9365
9366         return true;
9367 }
9368
9369 static void tg3_ints_init(struct tg3 *tp)
9370 {
9371         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9372             !tg3_flag(tp, TAGGED_STATUS)) {
9373                 /* All MSI supporting chips should support tagged
9374                  * status.  Assert that this is the case.
9375                  */
9376                 netdev_warn(tp->dev,
9377                             "MSI without TAGGED_STATUS? Not using MSI\n");
9378                 goto defcfg;
9379         }
9380
9381         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9382                 tg3_flag_set(tp, USING_MSIX);
9383         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9384                 tg3_flag_set(tp, USING_MSI);
9385
9386         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9387                 u32 msi_mode = tr32(MSGINT_MODE);
9388                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9389                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9390                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9391         }
9392 defcfg:
9393         if (!tg3_flag(tp, USING_MSIX)) {
9394                 tp->irq_cnt = 1;
9395                 tp->napi[0].irq_vec = tp->pdev->irq;
9396                 netif_set_real_num_tx_queues(tp->dev, 1);
9397                 netif_set_real_num_rx_queues(tp->dev, 1);
9398         }
9399 }
9400
9401 static void tg3_ints_fini(struct tg3 *tp)
9402 {
9403         if (tg3_flag(tp, USING_MSIX))
9404                 pci_disable_msix(tp->pdev);
9405         else if (tg3_flag(tp, USING_MSI))
9406                 pci_disable_msi(tp->pdev);
9407         tg3_flag_clear(tp, USING_MSI);
9408         tg3_flag_clear(tp, USING_MSIX);
9409         tg3_flag_clear(tp, ENABLE_RSS);
9410         tg3_flag_clear(tp, ENABLE_TSS);
9411 }
9412
9413 static int tg3_open(struct net_device *dev)
9414 {
9415         struct tg3 *tp = netdev_priv(dev);
9416         int i, err;
9417
9418         if (tp->fw_needed) {
9419                 err = tg3_request_firmware(tp);
9420                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9421                         if (err)
9422                                 return err;
9423                 } else if (err) {
9424                         netdev_warn(tp->dev, "TSO capability disabled\n");
9425                         tg3_flag_clear(tp, TSO_CAPABLE);
9426                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9427                         netdev_notice(tp->dev, "TSO capability restored\n");
9428                         tg3_flag_set(tp, TSO_CAPABLE);
9429                 }
9430         }
9431
9432         netif_carrier_off(tp->dev);
9433
9434         err = tg3_power_up(tp);
9435         if (err)
9436                 return err;
9437
9438         tg3_full_lock(tp, 0);
9439
9440         tg3_disable_ints(tp);
9441         tg3_flag_clear(tp, INIT_COMPLETE);
9442
9443         tg3_full_unlock(tp);
9444
9445         /*
9446          * Setup interrupts first so we know how
9447          * many NAPI resources to allocate
9448          */
9449         tg3_ints_init(tp);
9450
9451         /* The placement of this call is tied
9452          * to the setup and use of Host TX descriptors.
9453          */
9454         err = tg3_alloc_consistent(tp);
9455         if (err)
9456                 goto err_out1;
9457
9458         tg3_napi_init(tp);
9459
9460         tg3_napi_enable(tp);
9461
9462         for (i = 0; i < tp->irq_cnt; i++) {
9463                 struct tg3_napi *tnapi = &tp->napi[i];
9464                 err = tg3_request_irq(tp, i);
9465                 if (err) {
9466                         for (i--; i >= 0; i--)
9467                                 free_irq(tnapi->irq_vec, tnapi);
9468                         break;
9469                 }
9470         }
9471
9472         if (err)
9473                 goto err_out2;
9474
9475         tg3_full_lock(tp, 0);
9476
9477         err = tg3_init_hw(tp, 1);
9478         if (err) {
9479                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9480                 tg3_free_rings(tp);
9481         } else {
9482                 if (tg3_flag(tp, TAGGED_STATUS) &&
9483                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9484                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9485                         tp->timer_offset = HZ;
9486                 else
9487                         tp->timer_offset = HZ / 10;
9488
9489                 BUG_ON(tp->timer_offset > HZ);
9490                 tp->timer_counter = tp->timer_multiplier =
9491                         (HZ / tp->timer_offset);
9492                 tp->asf_counter = tp->asf_multiplier =
9493                         ((HZ / tp->timer_offset) * 2);
9494
9495                 init_timer(&tp->timer);
9496                 tp->timer.expires = jiffies + tp->timer_offset;
9497                 tp->timer.data = (unsigned long) tp;
9498                 tp->timer.function = tg3_timer;
9499         }
9500
9501         tg3_full_unlock(tp);
9502
9503         if (err)
9504                 goto err_out3;
9505
9506         if (tg3_flag(tp, USING_MSI)) {
9507                 err = tg3_test_msi(tp);
9508
9509                 if (err) {
9510                         tg3_full_lock(tp, 0);
9511                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9512                         tg3_free_rings(tp);
9513                         tg3_full_unlock(tp);
9514
9515                         goto err_out2;
9516                 }
9517
9518                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9519                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9520
9521                         tw32(PCIE_TRANSACTION_CFG,
9522                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9523                 }
9524         }
9525
9526         tg3_phy_start(tp);
9527
9528         tg3_full_lock(tp, 0);
9529
9530         add_timer(&tp->timer);
9531         tg3_flag_set(tp, INIT_COMPLETE);
9532         tg3_enable_ints(tp);
9533
9534         tg3_full_unlock(tp);
9535
9536         netif_tx_start_all_queues(dev);
9537
9538         /*
9539          * Reset loopback feature if it was turned on while the device was down
9540          * make sure that it's installed properly now.
9541          */
9542         if (dev->features & NETIF_F_LOOPBACK)
9543                 tg3_set_loopback(dev, dev->features);
9544
9545         return 0;
9546
9547 err_out3:
9548         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9549                 struct tg3_napi *tnapi = &tp->napi[i];
9550                 free_irq(tnapi->irq_vec, tnapi);
9551         }
9552
9553 err_out2:
9554         tg3_napi_disable(tp);
9555         tg3_napi_fini(tp);
9556         tg3_free_consistent(tp);
9557
9558 err_out1:
9559         tg3_ints_fini(tp);
9560         tg3_frob_aux_power(tp, false);
9561         pci_set_power_state(tp->pdev, PCI_D3hot);
9562         return err;
9563 }
9564
9565 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9566                                                  struct rtnl_link_stats64 *);
9567 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9568
9569 static int tg3_close(struct net_device *dev)
9570 {
9571         int i;
9572         struct tg3 *tp = netdev_priv(dev);
9573
9574         tg3_napi_disable(tp);
9575         cancel_work_sync(&tp->reset_task);
9576
9577         netif_tx_stop_all_queues(dev);
9578
9579         del_timer_sync(&tp->timer);
9580
9581         tg3_phy_stop(tp);
9582
9583         tg3_full_lock(tp, 1);
9584
9585         tg3_disable_ints(tp);
9586
9587         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9588         tg3_free_rings(tp);
9589         tg3_flag_clear(tp, INIT_COMPLETE);
9590
9591         tg3_full_unlock(tp);
9592
9593         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9594                 struct tg3_napi *tnapi = &tp->napi[i];
9595                 free_irq(tnapi->irq_vec, tnapi);
9596         }
9597
9598         tg3_ints_fini(tp);
9599
9600         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9601
9602         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9603                sizeof(tp->estats_prev));
9604
9605         tg3_napi_fini(tp);
9606
9607         tg3_free_consistent(tp);
9608
9609         tg3_power_down(tp);
9610
9611         netif_carrier_off(tp->dev);
9612
9613         return 0;
9614 }
9615
9616 static inline u64 get_stat64(tg3_stat64_t *val)
9617 {
9618        return ((u64)val->high << 32) | ((u64)val->low);
9619 }
9620
9621 static u64 calc_crc_errors(struct tg3 *tp)
9622 {
9623         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9624
9625         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9626             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9627              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9628                 u32 val;
9629
9630                 spin_lock_bh(&tp->lock);
9631                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9632                         tg3_writephy(tp, MII_TG3_TEST1,
9633                                      val | MII_TG3_TEST1_CRC_EN);
9634                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9635                 } else
9636                         val = 0;
9637                 spin_unlock_bh(&tp->lock);
9638
9639                 tp->phy_crc_errors += val;
9640
9641                 return tp->phy_crc_errors;
9642         }
9643
9644         return get_stat64(&hw_stats->rx_fcs_errors);
9645 }
9646
9647 #define ESTAT_ADD(member) \
9648         estats->member =        old_estats->member + \
9649                                 get_stat64(&hw_stats->member)
9650
9651 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9652 {
9653         struct tg3_ethtool_stats *estats = &tp->estats;
9654         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9655         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9656
9657         if (!hw_stats)
9658                 return old_estats;
9659
9660         ESTAT_ADD(rx_octets);
9661         ESTAT_ADD(rx_fragments);
9662         ESTAT_ADD(rx_ucast_packets);
9663         ESTAT_ADD(rx_mcast_packets);
9664         ESTAT_ADD(rx_bcast_packets);
9665         ESTAT_ADD(rx_fcs_errors);
9666         ESTAT_ADD(rx_align_errors);
9667         ESTAT_ADD(rx_xon_pause_rcvd);
9668         ESTAT_ADD(rx_xoff_pause_rcvd);
9669         ESTAT_ADD(rx_mac_ctrl_rcvd);
9670         ESTAT_ADD(rx_xoff_entered);
9671         ESTAT_ADD(rx_frame_too_long_errors);
9672         ESTAT_ADD(rx_jabbers);
9673         ESTAT_ADD(rx_undersize_packets);
9674         ESTAT_ADD(rx_in_length_errors);
9675         ESTAT_ADD(rx_out_length_errors);
9676         ESTAT_ADD(rx_64_or_less_octet_packets);
9677         ESTAT_ADD(rx_65_to_127_octet_packets);
9678         ESTAT_ADD(rx_128_to_255_octet_packets);
9679         ESTAT_ADD(rx_256_to_511_octet_packets);
9680         ESTAT_ADD(rx_512_to_1023_octet_packets);
9681         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9682         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9683         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9684         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9685         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9686
9687         ESTAT_ADD(tx_octets);
9688         ESTAT_ADD(tx_collisions);
9689         ESTAT_ADD(tx_xon_sent);
9690         ESTAT_ADD(tx_xoff_sent);
9691         ESTAT_ADD(tx_flow_control);
9692         ESTAT_ADD(tx_mac_errors);
9693         ESTAT_ADD(tx_single_collisions);
9694         ESTAT_ADD(tx_mult_collisions);
9695         ESTAT_ADD(tx_deferred);
9696         ESTAT_ADD(tx_excessive_collisions);
9697         ESTAT_ADD(tx_late_collisions);
9698         ESTAT_ADD(tx_collide_2times);
9699         ESTAT_ADD(tx_collide_3times);
9700         ESTAT_ADD(tx_collide_4times);
9701         ESTAT_ADD(tx_collide_5times);
9702         ESTAT_ADD(tx_collide_6times);
9703         ESTAT_ADD(tx_collide_7times);
9704         ESTAT_ADD(tx_collide_8times);
9705         ESTAT_ADD(tx_collide_9times);
9706         ESTAT_ADD(tx_collide_10times);
9707         ESTAT_ADD(tx_collide_11times);
9708         ESTAT_ADD(tx_collide_12times);
9709         ESTAT_ADD(tx_collide_13times);
9710         ESTAT_ADD(tx_collide_14times);
9711         ESTAT_ADD(tx_collide_15times);
9712         ESTAT_ADD(tx_ucast_packets);
9713         ESTAT_ADD(tx_mcast_packets);
9714         ESTAT_ADD(tx_bcast_packets);
9715         ESTAT_ADD(tx_carrier_sense_errors);
9716         ESTAT_ADD(tx_discards);
9717         ESTAT_ADD(tx_errors);
9718
9719         ESTAT_ADD(dma_writeq_full);
9720         ESTAT_ADD(dma_write_prioq_full);
9721         ESTAT_ADD(rxbds_empty);
9722         ESTAT_ADD(rx_discards);
9723         ESTAT_ADD(rx_errors);
9724         ESTAT_ADD(rx_threshold_hit);
9725
9726         ESTAT_ADD(dma_readq_full);
9727         ESTAT_ADD(dma_read_prioq_full);
9728         ESTAT_ADD(tx_comp_queue_full);
9729
9730         ESTAT_ADD(ring_set_send_prod_index);
9731         ESTAT_ADD(ring_status_update);
9732         ESTAT_ADD(nic_irqs);
9733         ESTAT_ADD(nic_avoided_irqs);
9734         ESTAT_ADD(nic_tx_threshold_hit);
9735
9736         ESTAT_ADD(mbuf_lwm_thresh_hit);
9737
9738         return estats;
9739 }
9740
9741 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9742                                                  struct rtnl_link_stats64 *stats)
9743 {
9744         struct tg3 *tp = netdev_priv(dev);
9745         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9746         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9747
9748         if (!hw_stats)
9749                 return old_stats;
9750
9751         stats->rx_packets = old_stats->rx_packets +
9752                 get_stat64(&hw_stats->rx_ucast_packets) +
9753                 get_stat64(&hw_stats->rx_mcast_packets) +
9754                 get_stat64(&hw_stats->rx_bcast_packets);
9755
9756         stats->tx_packets = old_stats->tx_packets +
9757                 get_stat64(&hw_stats->tx_ucast_packets) +
9758                 get_stat64(&hw_stats->tx_mcast_packets) +
9759                 get_stat64(&hw_stats->tx_bcast_packets);
9760
9761         stats->rx_bytes = old_stats->rx_bytes +
9762                 get_stat64(&hw_stats->rx_octets);
9763         stats->tx_bytes = old_stats->tx_bytes +
9764                 get_stat64(&hw_stats->tx_octets);
9765
9766         stats->rx_errors = old_stats->rx_errors +
9767                 get_stat64(&hw_stats->rx_errors);
9768         stats->tx_errors = old_stats->tx_errors +
9769                 get_stat64(&hw_stats->tx_errors) +
9770                 get_stat64(&hw_stats->tx_mac_errors) +
9771                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9772                 get_stat64(&hw_stats->tx_discards);
9773
9774         stats->multicast = old_stats->multicast +
9775                 get_stat64(&hw_stats->rx_mcast_packets);
9776         stats->collisions = old_stats->collisions +
9777                 get_stat64(&hw_stats->tx_collisions);
9778
9779         stats->rx_length_errors = old_stats->rx_length_errors +
9780                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9781                 get_stat64(&hw_stats->rx_undersize_packets);
9782
9783         stats->rx_over_errors = old_stats->rx_over_errors +
9784                 get_stat64(&hw_stats->rxbds_empty);
9785         stats->rx_frame_errors = old_stats->rx_frame_errors +
9786                 get_stat64(&hw_stats->rx_align_errors);
9787         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9788                 get_stat64(&hw_stats->tx_discards);
9789         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9790                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9791
9792         stats->rx_crc_errors = old_stats->rx_crc_errors +
9793                 calc_crc_errors(tp);
9794
9795         stats->rx_missed_errors = old_stats->rx_missed_errors +
9796                 get_stat64(&hw_stats->rx_discards);
9797
9798         stats->rx_dropped = tp->rx_dropped;
9799
9800         return stats;
9801 }
9802
9803 static inline u32 calc_crc(unsigned char *buf, int len)
9804 {
9805         u32 reg;
9806         u32 tmp;
9807         int j, k;
9808
9809         reg = 0xffffffff;
9810
9811         for (j = 0; j < len; j++) {
9812                 reg ^= buf[j];
9813
9814                 for (k = 0; k < 8; k++) {
9815                         tmp = reg & 0x01;
9816
9817                         reg >>= 1;
9818
9819                         if (tmp)
9820                                 reg ^= 0xedb88320;
9821                 }
9822         }
9823
9824         return ~reg;
9825 }
9826
9827 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9828 {
9829         /* accept or reject all multicast frames */
9830         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9831         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9832         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9833         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9834 }
9835
9836 static void __tg3_set_rx_mode(struct net_device *dev)
9837 {
9838         struct tg3 *tp = netdev_priv(dev);
9839         u32 rx_mode;
9840
9841         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9842                                   RX_MODE_KEEP_VLAN_TAG);
9843
9844 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9845         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9846          * flag clear.
9847          */
9848         if (!tg3_flag(tp, ENABLE_ASF))
9849                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9850 #endif
9851
9852         if (dev->flags & IFF_PROMISC) {
9853                 /* Promiscuous mode. */
9854                 rx_mode |= RX_MODE_PROMISC;
9855         } else if (dev->flags & IFF_ALLMULTI) {
9856                 /* Accept all multicast. */
9857                 tg3_set_multi(tp, 1);
9858         } else if (netdev_mc_empty(dev)) {
9859                 /* Reject all multicast. */
9860                 tg3_set_multi(tp, 0);
9861         } else {
9862                 /* Accept one or more multicast(s). */
9863                 struct netdev_hw_addr *ha;
9864                 u32 mc_filter[4] = { 0, };
9865                 u32 regidx;
9866                 u32 bit;
9867                 u32 crc;
9868
9869                 netdev_for_each_mc_addr(ha, dev) {
9870                         crc = calc_crc(ha->addr, ETH_ALEN);
9871                         bit = ~crc & 0x7f;
9872                         regidx = (bit & 0x60) >> 5;
9873                         bit &= 0x1f;
9874                         mc_filter[regidx] |= (1 << bit);
9875                 }
9876
9877                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9878                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9879                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9880                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9881         }
9882
9883         if (rx_mode != tp->rx_mode) {
9884                 tp->rx_mode = rx_mode;
9885                 tw32_f(MAC_RX_MODE, rx_mode);
9886                 udelay(10);
9887         }
9888 }
9889
9890 static void tg3_set_rx_mode(struct net_device *dev)
9891 {
9892         struct tg3 *tp = netdev_priv(dev);
9893
9894         if (!netif_running(dev))
9895                 return;
9896
9897         tg3_full_lock(tp, 0);
9898         __tg3_set_rx_mode(dev);
9899         tg3_full_unlock(tp);
9900 }
9901
9902 static int tg3_get_regs_len(struct net_device *dev)
9903 {
9904         return TG3_REG_BLK_SIZE;
9905 }
9906
9907 static void tg3_get_regs(struct net_device *dev,
9908                 struct ethtool_regs *regs, void *_p)
9909 {
9910         struct tg3 *tp = netdev_priv(dev);
9911
9912         regs->version = 0;
9913
9914         memset(_p, 0, TG3_REG_BLK_SIZE);
9915
9916         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9917                 return;
9918
9919         tg3_full_lock(tp, 0);
9920
9921         tg3_dump_legacy_regs(tp, (u32 *)_p);
9922
9923         tg3_full_unlock(tp);
9924 }
9925
9926 static int tg3_get_eeprom_len(struct net_device *dev)
9927 {
9928         struct tg3 *tp = netdev_priv(dev);
9929
9930         return tp->nvram_size;
9931 }
9932
9933 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9934 {
9935         struct tg3 *tp = netdev_priv(dev);
9936         int ret;
9937         u8  *pd;
9938         u32 i, offset, len, b_offset, b_count;
9939         __be32 val;
9940
9941         if (tg3_flag(tp, NO_NVRAM))
9942                 return -EINVAL;
9943
9944         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9945                 return -EAGAIN;
9946
9947         offset = eeprom->offset;
9948         len = eeprom->len;
9949         eeprom->len = 0;
9950
9951         eeprom->magic = TG3_EEPROM_MAGIC;
9952
9953         if (offset & 3) {
9954                 /* adjustments to start on required 4 byte boundary */
9955                 b_offset = offset & 3;
9956                 b_count = 4 - b_offset;
9957                 if (b_count > len) {
9958                         /* i.e. offset=1 len=2 */
9959                         b_count = len;
9960                 }
9961                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9962                 if (ret)
9963                         return ret;
9964                 memcpy(data, ((char *)&val) + b_offset, b_count);
9965                 len -= b_count;
9966                 offset += b_count;
9967                 eeprom->len += b_count;
9968         }
9969
9970         /* read bytes up to the last 4 byte boundary */
9971         pd = &data[eeprom->len];
9972         for (i = 0; i < (len - (len & 3)); i += 4) {
9973                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9974                 if (ret) {
9975                         eeprom->len += i;
9976                         return ret;
9977                 }
9978                 memcpy(pd + i, &val, 4);
9979         }
9980         eeprom->len += i;
9981
9982         if (len & 3) {
9983                 /* read last bytes not ending on 4 byte boundary */
9984                 pd = &data[eeprom->len];
9985                 b_count = len & 3;
9986                 b_offset = offset + len - b_count;
9987                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9988                 if (ret)
9989                         return ret;
9990                 memcpy(pd, &val, b_count);
9991                 eeprom->len += b_count;
9992         }
9993         return 0;
9994 }
9995
9996 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9997
9998 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9999 {
10000         struct tg3 *tp = netdev_priv(dev);
10001         int ret;
10002         u32 offset, len, b_offset, odd_len;
10003         u8 *buf;
10004         __be32 start, end;
10005
10006         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10007                 return -EAGAIN;
10008
10009         if (tg3_flag(tp, NO_NVRAM) ||
10010             eeprom->magic != TG3_EEPROM_MAGIC)
10011                 return -EINVAL;
10012
10013         offset = eeprom->offset;
10014         len = eeprom->len;
10015
10016         if ((b_offset = (offset & 3))) {
10017                 /* adjustments to start on required 4 byte boundary */
10018                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10019                 if (ret)
10020                         return ret;
10021                 len += b_offset;
10022                 offset &= ~3;
10023                 if (len < 4)
10024                         len = 4;
10025         }
10026
10027         odd_len = 0;
10028         if (len & 3) {
10029                 /* adjustments to end on required 4 byte boundary */
10030                 odd_len = 1;
10031                 len = (len + 3) & ~3;
10032                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10033                 if (ret)
10034                         return ret;
10035         }
10036
10037         buf = data;
10038         if (b_offset || odd_len) {
10039                 buf = kmalloc(len, GFP_KERNEL);
10040                 if (!buf)
10041                         return -ENOMEM;
10042                 if (b_offset)
10043                         memcpy(buf, &start, 4);
10044                 if (odd_len)
10045                         memcpy(buf+len-4, &end, 4);
10046                 memcpy(buf + b_offset, data, eeprom->len);
10047         }
10048
10049         ret = tg3_nvram_write_block(tp, offset, len, buf);
10050
10051         if (buf != data)
10052                 kfree(buf);
10053
10054         return ret;
10055 }
10056
10057 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10058 {
10059         struct tg3 *tp = netdev_priv(dev);
10060
10061         if (tg3_flag(tp, USE_PHYLIB)) {
10062                 struct phy_device *phydev;
10063                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10064                         return -EAGAIN;
10065                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10066                 return phy_ethtool_gset(phydev, cmd);
10067         }
10068
10069         cmd->supported = (SUPPORTED_Autoneg);
10070
10071         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10072                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10073                                    SUPPORTED_1000baseT_Full);
10074
10075         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10076                 cmd->supported |= (SUPPORTED_100baseT_Half |
10077                                   SUPPORTED_100baseT_Full |
10078                                   SUPPORTED_10baseT_Half |
10079                                   SUPPORTED_10baseT_Full |
10080                                   SUPPORTED_TP);
10081                 cmd->port = PORT_TP;
10082         } else {
10083                 cmd->supported |= SUPPORTED_FIBRE;
10084                 cmd->port = PORT_FIBRE;
10085         }
10086
10087         cmd->advertising = tp->link_config.advertising;
10088         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10089                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10090                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10091                                 cmd->advertising |= ADVERTISED_Pause;
10092                         } else {
10093                                 cmd->advertising |= ADVERTISED_Pause |
10094                                                     ADVERTISED_Asym_Pause;
10095                         }
10096                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10097                         cmd->advertising |= ADVERTISED_Asym_Pause;
10098                 }
10099         }
10100         if (netif_running(dev)) {
10101                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10102                 cmd->duplex = tp->link_config.active_duplex;
10103         } else {
10104                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10105                 cmd->duplex = DUPLEX_INVALID;
10106         }
10107         cmd->phy_address = tp->phy_addr;
10108         cmd->transceiver = XCVR_INTERNAL;
10109         cmd->autoneg = tp->link_config.autoneg;
10110         cmd->maxtxpkt = 0;
10111         cmd->maxrxpkt = 0;
10112         return 0;
10113 }
10114
10115 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10116 {
10117         struct tg3 *tp = netdev_priv(dev);
10118         u32 speed = ethtool_cmd_speed(cmd);
10119
10120         if (tg3_flag(tp, USE_PHYLIB)) {
10121                 struct phy_device *phydev;
10122                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10123                         return -EAGAIN;
10124                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10125                 return phy_ethtool_sset(phydev, cmd);
10126         }
10127
10128         if (cmd->autoneg != AUTONEG_ENABLE &&
10129             cmd->autoneg != AUTONEG_DISABLE)
10130                 return -EINVAL;
10131
10132         if (cmd->autoneg == AUTONEG_DISABLE &&
10133             cmd->duplex != DUPLEX_FULL &&
10134             cmd->duplex != DUPLEX_HALF)
10135                 return -EINVAL;
10136
10137         if (cmd->autoneg == AUTONEG_ENABLE) {
10138                 u32 mask = ADVERTISED_Autoneg |
10139                            ADVERTISED_Pause |
10140                            ADVERTISED_Asym_Pause;
10141
10142                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10143                         mask |= ADVERTISED_1000baseT_Half |
10144                                 ADVERTISED_1000baseT_Full;
10145
10146                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10147                         mask |= ADVERTISED_100baseT_Half |
10148                                 ADVERTISED_100baseT_Full |
10149                                 ADVERTISED_10baseT_Half |
10150                                 ADVERTISED_10baseT_Full |
10151                                 ADVERTISED_TP;
10152                 else
10153                         mask |= ADVERTISED_FIBRE;
10154
10155                 if (cmd->advertising & ~mask)
10156                         return -EINVAL;
10157
10158                 mask &= (ADVERTISED_1000baseT_Half |
10159                          ADVERTISED_1000baseT_Full |
10160                          ADVERTISED_100baseT_Half |
10161                          ADVERTISED_100baseT_Full |
10162                          ADVERTISED_10baseT_Half |
10163                          ADVERTISED_10baseT_Full);
10164
10165                 cmd->advertising &= mask;
10166         } else {
10167                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10168                         if (speed != SPEED_1000)
10169                                 return -EINVAL;
10170
10171                         if (cmd->duplex != DUPLEX_FULL)
10172                                 return -EINVAL;
10173                 } else {
10174                         if (speed != SPEED_100 &&
10175                             speed != SPEED_10)
10176                                 return -EINVAL;
10177                 }
10178         }
10179
10180         tg3_full_lock(tp, 0);
10181
10182         tp->link_config.autoneg = cmd->autoneg;
10183         if (cmd->autoneg == AUTONEG_ENABLE) {
10184                 tp->link_config.advertising = (cmd->advertising |
10185                                               ADVERTISED_Autoneg);
10186                 tp->link_config.speed = SPEED_INVALID;
10187                 tp->link_config.duplex = DUPLEX_INVALID;
10188         } else {
10189                 tp->link_config.advertising = 0;
10190                 tp->link_config.speed = speed;
10191                 tp->link_config.duplex = cmd->duplex;
10192         }
10193
10194         tp->link_config.orig_speed = tp->link_config.speed;
10195         tp->link_config.orig_duplex = tp->link_config.duplex;
10196         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10197
10198         if (netif_running(dev))
10199                 tg3_setup_phy(tp, 1);
10200
10201         tg3_full_unlock(tp);
10202
10203         return 0;
10204 }
10205
10206 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10207 {
10208         struct tg3 *tp = netdev_priv(dev);
10209
10210         strcpy(info->driver, DRV_MODULE_NAME);
10211         strcpy(info->version, DRV_MODULE_VERSION);
10212         strcpy(info->fw_version, tp->fw_ver);
10213         strcpy(info->bus_info, pci_name(tp->pdev));
10214 }
10215
10216 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10217 {
10218         struct tg3 *tp = netdev_priv(dev);
10219
10220         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10221                 wol->supported = WAKE_MAGIC;
10222         else
10223                 wol->supported = 0;
10224         wol->wolopts = 0;
10225         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10226                 wol->wolopts = WAKE_MAGIC;
10227         memset(&wol->sopass, 0, sizeof(wol->sopass));
10228 }
10229
10230 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10231 {
10232         struct tg3 *tp = netdev_priv(dev);
10233         struct device *dp = &tp->pdev->dev;
10234
10235         if (wol->wolopts & ~WAKE_MAGIC)
10236                 return -EINVAL;
10237         if ((wol->wolopts & WAKE_MAGIC) &&
10238             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10239                 return -EINVAL;
10240
10241         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10242
10243         spin_lock_bh(&tp->lock);
10244         if (device_may_wakeup(dp))
10245                 tg3_flag_set(tp, WOL_ENABLE);
10246         else
10247                 tg3_flag_clear(tp, WOL_ENABLE);
10248         spin_unlock_bh(&tp->lock);
10249
10250         return 0;
10251 }
10252
10253 static u32 tg3_get_msglevel(struct net_device *dev)
10254 {
10255         struct tg3 *tp = netdev_priv(dev);
10256         return tp->msg_enable;
10257 }
10258
10259 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10260 {
10261         struct tg3 *tp = netdev_priv(dev);
10262         tp->msg_enable = value;
10263 }
10264
10265 static int tg3_nway_reset(struct net_device *dev)
10266 {
10267         struct tg3 *tp = netdev_priv(dev);
10268         int r;
10269
10270         if (!netif_running(dev))
10271                 return -EAGAIN;
10272
10273         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10274                 return -EINVAL;
10275
10276         if (tg3_flag(tp, USE_PHYLIB)) {
10277                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10278                         return -EAGAIN;
10279                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10280         } else {
10281                 u32 bmcr;
10282
10283                 spin_lock_bh(&tp->lock);
10284                 r = -EINVAL;
10285                 tg3_readphy(tp, MII_BMCR, &bmcr);
10286                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10287                     ((bmcr & BMCR_ANENABLE) ||
10288                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10289                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10290                                                    BMCR_ANENABLE);
10291                         r = 0;
10292                 }
10293                 spin_unlock_bh(&tp->lock);
10294         }
10295
10296         return r;
10297 }
10298
10299 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10300 {
10301         struct tg3 *tp = netdev_priv(dev);
10302
10303         ering->rx_max_pending = tp->rx_std_ring_mask;
10304         ering->rx_mini_max_pending = 0;
10305         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10306                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10307         else
10308                 ering->rx_jumbo_max_pending = 0;
10309
10310         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10311
10312         ering->rx_pending = tp->rx_pending;
10313         ering->rx_mini_pending = 0;
10314         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10315                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10316         else
10317                 ering->rx_jumbo_pending = 0;
10318
10319         ering->tx_pending = tp->napi[0].tx_pending;
10320 }
10321
10322 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10323 {
10324         struct tg3 *tp = netdev_priv(dev);
10325         int i, irq_sync = 0, err = 0;
10326
10327         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10328             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10329             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10330             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10331             (tg3_flag(tp, TSO_BUG) &&
10332              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10333                 return -EINVAL;
10334
10335         if (netif_running(dev)) {
10336                 tg3_phy_stop(tp);
10337                 tg3_netif_stop(tp);
10338                 irq_sync = 1;
10339         }
10340
10341         tg3_full_lock(tp, irq_sync);
10342
10343         tp->rx_pending = ering->rx_pending;
10344
10345         if (tg3_flag(tp, MAX_RXPEND_64) &&
10346             tp->rx_pending > 63)
10347                 tp->rx_pending = 63;
10348         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10349
10350         for (i = 0; i < tp->irq_max; i++)
10351                 tp->napi[i].tx_pending = ering->tx_pending;
10352
10353         if (netif_running(dev)) {
10354                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10355                 err = tg3_restart_hw(tp, 1);
10356                 if (!err)
10357                         tg3_netif_start(tp);
10358         }
10359
10360         tg3_full_unlock(tp);
10361
10362         if (irq_sync && !err)
10363                 tg3_phy_start(tp);
10364
10365         return err;
10366 }
10367
10368 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10369 {
10370         struct tg3 *tp = netdev_priv(dev);
10371
10372         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10373
10374         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10375                 epause->rx_pause = 1;
10376         else
10377                 epause->rx_pause = 0;
10378
10379         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10380                 epause->tx_pause = 1;
10381         else
10382                 epause->tx_pause = 0;
10383 }
10384
10385 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10386 {
10387         struct tg3 *tp = netdev_priv(dev);
10388         int err = 0;
10389
10390         if (tg3_flag(tp, USE_PHYLIB)) {
10391                 u32 newadv;
10392                 struct phy_device *phydev;
10393
10394                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10395
10396                 if (!(phydev->supported & SUPPORTED_Pause) ||
10397                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10398                      (epause->rx_pause != epause->tx_pause)))
10399                         return -EINVAL;
10400
10401                 tp->link_config.flowctrl = 0;
10402                 if (epause->rx_pause) {
10403                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10404
10405                         if (epause->tx_pause) {
10406                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10407                                 newadv = ADVERTISED_Pause;
10408                         } else
10409                                 newadv = ADVERTISED_Pause |
10410                                          ADVERTISED_Asym_Pause;
10411                 } else if (epause->tx_pause) {
10412                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10413                         newadv = ADVERTISED_Asym_Pause;
10414                 } else
10415                         newadv = 0;
10416
10417                 if (epause->autoneg)
10418                         tg3_flag_set(tp, PAUSE_AUTONEG);
10419                 else
10420                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10421
10422                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10423                         u32 oldadv = phydev->advertising &
10424                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10425                         if (oldadv != newadv) {
10426                                 phydev->advertising &=
10427                                         ~(ADVERTISED_Pause |
10428                                           ADVERTISED_Asym_Pause);
10429                                 phydev->advertising |= newadv;
10430                                 if (phydev->autoneg) {
10431                                         /*
10432                                          * Always renegotiate the link to
10433                                          * inform our link partner of our
10434                                          * flow control settings, even if the
10435                                          * flow control is forced.  Let
10436                                          * tg3_adjust_link() do the final
10437                                          * flow control setup.
10438                                          */
10439                                         return phy_start_aneg(phydev);
10440                                 }
10441                         }
10442
10443                         if (!epause->autoneg)
10444                                 tg3_setup_flow_control(tp, 0, 0);
10445                 } else {
10446                         tp->link_config.orig_advertising &=
10447                                         ~(ADVERTISED_Pause |
10448                                           ADVERTISED_Asym_Pause);
10449                         tp->link_config.orig_advertising |= newadv;
10450                 }
10451         } else {
10452                 int irq_sync = 0;
10453
10454                 if (netif_running(dev)) {
10455                         tg3_netif_stop(tp);
10456                         irq_sync = 1;
10457                 }
10458
10459                 tg3_full_lock(tp, irq_sync);
10460
10461                 if (epause->autoneg)
10462                         tg3_flag_set(tp, PAUSE_AUTONEG);
10463                 else
10464                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10465                 if (epause->rx_pause)
10466                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10467                 else
10468                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10469                 if (epause->tx_pause)
10470                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10471                 else
10472                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10473
10474                 if (netif_running(dev)) {
10475                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10476                         err = tg3_restart_hw(tp, 1);
10477                         if (!err)
10478                                 tg3_netif_start(tp);
10479                 }
10480
10481                 tg3_full_unlock(tp);
10482         }
10483
10484         return err;
10485 }
10486
10487 static int tg3_get_sset_count(struct net_device *dev, int sset)
10488 {
10489         switch (sset) {
10490         case ETH_SS_TEST:
10491                 return TG3_NUM_TEST;
10492         case ETH_SS_STATS:
10493                 return TG3_NUM_STATS;
10494         default:
10495                 return -EOPNOTSUPP;
10496         }
10497 }
10498
10499 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10500 {
10501         switch (stringset) {
10502         case ETH_SS_STATS:
10503                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10504                 break;
10505         case ETH_SS_TEST:
10506                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10507                 break;
10508         default:
10509                 WARN_ON(1);     /* we need a WARN() */
10510                 break;
10511         }
10512 }
10513
10514 static int tg3_set_phys_id(struct net_device *dev,
10515                             enum ethtool_phys_id_state state)
10516 {
10517         struct tg3 *tp = netdev_priv(dev);
10518
10519         if (!netif_running(tp->dev))
10520                 return -EAGAIN;
10521
10522         switch (state) {
10523         case ETHTOOL_ID_ACTIVE:
10524                 return 1;       /* cycle on/off once per second */
10525
10526         case ETHTOOL_ID_ON:
10527                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10528                      LED_CTRL_1000MBPS_ON |
10529                      LED_CTRL_100MBPS_ON |
10530                      LED_CTRL_10MBPS_ON |
10531                      LED_CTRL_TRAFFIC_OVERRIDE |
10532                      LED_CTRL_TRAFFIC_BLINK |
10533                      LED_CTRL_TRAFFIC_LED);
10534                 break;
10535
10536         case ETHTOOL_ID_OFF:
10537                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10538                      LED_CTRL_TRAFFIC_OVERRIDE);
10539                 break;
10540
10541         case ETHTOOL_ID_INACTIVE:
10542                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10543                 break;
10544         }
10545
10546         return 0;
10547 }
10548
10549 static void tg3_get_ethtool_stats(struct net_device *dev,
10550                                    struct ethtool_stats *estats, u64 *tmp_stats)
10551 {
10552         struct tg3 *tp = netdev_priv(dev);
10553         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10554 }
10555
10556 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10557 {
10558         int i;
10559         __be32 *buf;
10560         u32 offset = 0, len = 0;
10561         u32 magic, val;
10562
10563         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10564                 return NULL;
10565
10566         if (magic == TG3_EEPROM_MAGIC) {
10567                 for (offset = TG3_NVM_DIR_START;
10568                      offset < TG3_NVM_DIR_END;
10569                      offset += TG3_NVM_DIRENT_SIZE) {
10570                         if (tg3_nvram_read(tp, offset, &val))
10571                                 return NULL;
10572
10573                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10574                             TG3_NVM_DIRTYPE_EXTVPD)
10575                                 break;
10576                 }
10577
10578                 if (offset != TG3_NVM_DIR_END) {
10579                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10580                         if (tg3_nvram_read(tp, offset + 4, &offset))
10581                                 return NULL;
10582
10583                         offset = tg3_nvram_logical_addr(tp, offset);
10584                 }
10585         }
10586
10587         if (!offset || !len) {
10588                 offset = TG3_NVM_VPD_OFF;
10589                 len = TG3_NVM_VPD_LEN;
10590         }
10591
10592         buf = kmalloc(len, GFP_KERNEL);
10593         if (buf == NULL)
10594                 return NULL;
10595
10596         if (magic == TG3_EEPROM_MAGIC) {
10597                 for (i = 0; i < len; i += 4) {
10598                         /* The data is in little-endian format in NVRAM.
10599                          * Use the big-endian read routines to preserve
10600                          * the byte order as it exists in NVRAM.
10601                          */
10602                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10603                                 goto error;
10604                 }
10605         } else {
10606                 u8 *ptr;
10607                 ssize_t cnt;
10608                 unsigned int pos = 0;
10609
10610                 ptr = (u8 *)&buf[0];
10611                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10612                         cnt = pci_read_vpd(tp->pdev, pos,
10613                                            len - pos, ptr);
10614                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10615                                 cnt = 0;
10616                         else if (cnt < 0)
10617                                 goto error;
10618                 }
10619                 if (pos != len)
10620                         goto error;
10621         }
10622
10623         return buf;
10624
10625 error:
10626         kfree(buf);
10627         return NULL;
10628 }
10629
10630 #define NVRAM_TEST_SIZE 0x100
10631 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10632 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10633 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10634 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10635 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10636 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x4c
10637 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10638 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10639
10640 static int tg3_test_nvram(struct tg3 *tp)
10641 {
10642         u32 csum, magic;
10643         __be32 *buf;
10644         int i, j, k, err = 0, size;
10645
10646         if (tg3_flag(tp, NO_NVRAM))
10647                 return 0;
10648
10649         if (tg3_nvram_read(tp, 0, &magic) != 0)
10650                 return -EIO;
10651
10652         if (magic == TG3_EEPROM_MAGIC)
10653                 size = NVRAM_TEST_SIZE;
10654         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10655                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10656                     TG3_EEPROM_SB_FORMAT_1) {
10657                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10658                         case TG3_EEPROM_SB_REVISION_0:
10659                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10660                                 break;
10661                         case TG3_EEPROM_SB_REVISION_2:
10662                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10663                                 break;
10664                         case TG3_EEPROM_SB_REVISION_3:
10665                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10666                                 break;
10667                         case TG3_EEPROM_SB_REVISION_4:
10668                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10669                                 break;
10670                         case TG3_EEPROM_SB_REVISION_5:
10671                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10672                                 break;
10673                         case TG3_EEPROM_SB_REVISION_6:
10674                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10675                                 break;
10676                         default:
10677                                 return -EIO;
10678                         }
10679                 } else
10680                         return 0;
10681         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10682                 size = NVRAM_SELFBOOT_HW_SIZE;
10683         else
10684                 return -EIO;
10685
10686         buf = kmalloc(size, GFP_KERNEL);
10687         if (buf == NULL)
10688                 return -ENOMEM;
10689
10690         err = -EIO;
10691         for (i = 0, j = 0; i < size; i += 4, j++) {
10692                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10693                 if (err)
10694                         break;
10695         }
10696         if (i < size)
10697                 goto out;
10698
10699         /* Selfboot format */
10700         magic = be32_to_cpu(buf[0]);
10701         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10702             TG3_EEPROM_MAGIC_FW) {
10703                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10704
10705                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10706                     TG3_EEPROM_SB_REVISION_2) {
10707                         /* For rev 2, the csum doesn't include the MBA. */
10708                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10709                                 csum8 += buf8[i];
10710                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10711                                 csum8 += buf8[i];
10712                 } else {
10713                         for (i = 0; i < size; i++)
10714                                 csum8 += buf8[i];
10715                 }
10716
10717                 if (csum8 == 0) {
10718                         err = 0;
10719                         goto out;
10720                 }
10721
10722                 err = -EIO;
10723                 goto out;
10724         }
10725
10726         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10727             TG3_EEPROM_MAGIC_HW) {
10728                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10729                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10730                 u8 *buf8 = (u8 *) buf;
10731
10732                 /* Separate the parity bits and the data bytes.  */
10733                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10734                         if ((i == 0) || (i == 8)) {
10735                                 int l;
10736                                 u8 msk;
10737
10738                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10739                                         parity[k++] = buf8[i] & msk;
10740                                 i++;
10741                         } else if (i == 16) {
10742                                 int l;
10743                                 u8 msk;
10744
10745                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10746                                         parity[k++] = buf8[i] & msk;
10747                                 i++;
10748
10749                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10750                                         parity[k++] = buf8[i] & msk;
10751                                 i++;
10752                         }
10753                         data[j++] = buf8[i];
10754                 }
10755
10756                 err = -EIO;
10757                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10758                         u8 hw8 = hweight8(data[i]);
10759
10760                         if ((hw8 & 0x1) && parity[i])
10761                                 goto out;
10762                         else if (!(hw8 & 0x1) && !parity[i])
10763                                 goto out;
10764                 }
10765                 err = 0;
10766                 goto out;
10767         }
10768
10769         err = -EIO;
10770
10771         /* Bootstrap checksum at offset 0x10 */
10772         csum = calc_crc((unsigned char *) buf, 0x10);
10773         if (csum != le32_to_cpu(buf[0x10/4]))
10774                 goto out;
10775
10776         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10777         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10778         if (csum != le32_to_cpu(buf[0xfc/4]))
10779                 goto out;
10780
10781         kfree(buf);
10782
10783         buf = tg3_vpd_readblock(tp);
10784         if (!buf)
10785                 return -ENOMEM;
10786
10787         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10788                              PCI_VPD_LRDT_RO_DATA);
10789         if (i > 0) {
10790                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10791                 if (j < 0)
10792                         goto out;
10793
10794                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10795                         goto out;
10796
10797                 i += PCI_VPD_LRDT_TAG_SIZE;
10798                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10799                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10800                 if (j > 0) {
10801                         u8 csum8 = 0;
10802
10803                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10804
10805                         for (i = 0; i <= j; i++)
10806                                 csum8 += ((u8 *)buf)[i];
10807
10808                         if (csum8)
10809                                 goto out;
10810                 }
10811         }
10812
10813         err = 0;
10814
10815 out:
10816         kfree(buf);
10817         return err;
10818 }
10819
10820 #define TG3_SERDES_TIMEOUT_SEC  2
10821 #define TG3_COPPER_TIMEOUT_SEC  6
10822
10823 static int tg3_test_link(struct tg3 *tp)
10824 {
10825         int i, max;
10826
10827         if (!netif_running(tp->dev))
10828                 return -ENODEV;
10829
10830         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10831                 max = TG3_SERDES_TIMEOUT_SEC;
10832         else
10833                 max = TG3_COPPER_TIMEOUT_SEC;
10834
10835         for (i = 0; i < max; i++) {
10836                 if (netif_carrier_ok(tp->dev))
10837                         return 0;
10838
10839                 if (msleep_interruptible(1000))
10840                         break;
10841         }
10842
10843         return -EIO;
10844 }
10845
10846 /* Only test the commonly used registers */
10847 static int tg3_test_registers(struct tg3 *tp)
10848 {
10849         int i, is_5705, is_5750;
10850         u32 offset, read_mask, write_mask, val, save_val, read_val;
10851         static struct {
10852                 u16 offset;
10853                 u16 flags;
10854 #define TG3_FL_5705     0x1
10855 #define TG3_FL_NOT_5705 0x2
10856 #define TG3_FL_NOT_5788 0x4
10857 #define TG3_FL_NOT_5750 0x8
10858                 u32 read_mask;
10859                 u32 write_mask;
10860         } reg_tbl[] = {
10861                 /* MAC Control Registers */
10862                 { MAC_MODE, TG3_FL_NOT_5705,
10863                         0x00000000, 0x00ef6f8c },
10864                 { MAC_MODE, TG3_FL_5705,
10865                         0x00000000, 0x01ef6b8c },
10866                 { MAC_STATUS, TG3_FL_NOT_5705,
10867                         0x03800107, 0x00000000 },
10868                 { MAC_STATUS, TG3_FL_5705,
10869                         0x03800100, 0x00000000 },
10870                 { MAC_ADDR_0_HIGH, 0x0000,
10871                         0x00000000, 0x0000ffff },
10872                 { MAC_ADDR_0_LOW, 0x0000,
10873                         0x00000000, 0xffffffff },
10874                 { MAC_RX_MTU_SIZE, 0x0000,
10875                         0x00000000, 0x0000ffff },
10876                 { MAC_TX_MODE, 0x0000,
10877                         0x00000000, 0x00000070 },
10878                 { MAC_TX_LENGTHS, 0x0000,
10879                         0x00000000, 0x00003fff },
10880                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10881                         0x00000000, 0x000007fc },
10882                 { MAC_RX_MODE, TG3_FL_5705,
10883                         0x00000000, 0x000007dc },
10884                 { MAC_HASH_REG_0, 0x0000,
10885                         0x00000000, 0xffffffff },
10886                 { MAC_HASH_REG_1, 0x0000,
10887                         0x00000000, 0xffffffff },
10888                 { MAC_HASH_REG_2, 0x0000,
10889                         0x00000000, 0xffffffff },
10890                 { MAC_HASH_REG_3, 0x0000,
10891                         0x00000000, 0xffffffff },
10892
10893                 /* Receive Data and Receive BD Initiator Control Registers. */
10894                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10895                         0x00000000, 0xffffffff },
10896                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10897                         0x00000000, 0xffffffff },
10898                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10899                         0x00000000, 0x00000003 },
10900                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10901                         0x00000000, 0xffffffff },
10902                 { RCVDBDI_STD_BD+0, 0x0000,
10903                         0x00000000, 0xffffffff },
10904                 { RCVDBDI_STD_BD+4, 0x0000,
10905                         0x00000000, 0xffffffff },
10906                 { RCVDBDI_STD_BD+8, 0x0000,
10907                         0x00000000, 0xffff0002 },
10908                 { RCVDBDI_STD_BD+0xc, 0x0000,
10909                         0x00000000, 0xffffffff },
10910
10911                 /* Receive BD Initiator Control Registers. */
10912                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10913                         0x00000000, 0xffffffff },
10914                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10915                         0x00000000, 0x000003ff },
10916                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10917                         0x00000000, 0xffffffff },
10918
10919                 /* Host Coalescing Control Registers. */
10920                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10921                         0x00000000, 0x00000004 },
10922                 { HOSTCC_MODE, TG3_FL_5705,
10923                         0x00000000, 0x000000f6 },
10924                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10925                         0x00000000, 0xffffffff },
10926                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10927                         0x00000000, 0x000003ff },
10928                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10929                         0x00000000, 0xffffffff },
10930                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10931                         0x00000000, 0x000003ff },
10932                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10933                         0x00000000, 0xffffffff },
10934                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10935                         0x00000000, 0x000000ff },
10936                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10937                         0x00000000, 0xffffffff },
10938                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10939                         0x00000000, 0x000000ff },
10940                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10941                         0x00000000, 0xffffffff },
10942                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10943                         0x00000000, 0xffffffff },
10944                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10945                         0x00000000, 0xffffffff },
10946                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10947                         0x00000000, 0x000000ff },
10948                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10949                         0x00000000, 0xffffffff },
10950                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10951                         0x00000000, 0x000000ff },
10952                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10953                         0x00000000, 0xffffffff },
10954                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10955                         0x00000000, 0xffffffff },
10956                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10957                         0x00000000, 0xffffffff },
10958                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10959                         0x00000000, 0xffffffff },
10960                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10961                         0x00000000, 0xffffffff },
10962                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10963                         0xffffffff, 0x00000000 },
10964                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10965                         0xffffffff, 0x00000000 },
10966
10967                 /* Buffer Manager Control Registers. */
10968                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10969                         0x00000000, 0x007fff80 },
10970                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10971                         0x00000000, 0x007fffff },
10972                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10973                         0x00000000, 0x0000003f },
10974                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10975                         0x00000000, 0x000001ff },
10976                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10977                         0x00000000, 0x000001ff },
10978                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10979                         0xffffffff, 0x00000000 },
10980                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10981                         0xffffffff, 0x00000000 },
10982
10983                 /* Mailbox Registers */
10984                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10985                         0x00000000, 0x000001ff },
10986                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10987                         0x00000000, 0x000001ff },
10988                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10989                         0x00000000, 0x000007ff },
10990                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10991                         0x00000000, 0x000001ff },
10992
10993                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10994         };
10995
10996         is_5705 = is_5750 = 0;
10997         if (tg3_flag(tp, 5705_PLUS)) {
10998                 is_5705 = 1;
10999                 if (tg3_flag(tp, 5750_PLUS))
11000                         is_5750 = 1;
11001         }
11002
11003         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11004                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11005                         continue;
11006
11007                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11008                         continue;
11009
11010                 if (tg3_flag(tp, IS_5788) &&
11011                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11012                         continue;
11013
11014                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11015                         continue;
11016
11017                 offset = (u32) reg_tbl[i].offset;
11018                 read_mask = reg_tbl[i].read_mask;
11019                 write_mask = reg_tbl[i].write_mask;
11020
11021                 /* Save the original register content */
11022                 save_val = tr32(offset);
11023
11024                 /* Determine the read-only value. */
11025                 read_val = save_val & read_mask;
11026
11027                 /* Write zero to the register, then make sure the read-only bits
11028                  * are not changed and the read/write bits are all zeros.
11029                  */
11030                 tw32(offset, 0);
11031
11032                 val = tr32(offset);
11033
11034                 /* Test the read-only and read/write bits. */
11035                 if (((val & read_mask) != read_val) || (val & write_mask))
11036                         goto out;
11037
11038                 /* Write ones to all the bits defined by RdMask and WrMask, then
11039                  * make sure the read-only bits are not changed and the
11040                  * read/write bits are all ones.
11041                  */
11042                 tw32(offset, read_mask | write_mask);
11043
11044                 val = tr32(offset);
11045
11046                 /* Test the read-only bits. */
11047                 if ((val & read_mask) != read_val)
11048                         goto out;
11049
11050                 /* Test the read/write bits. */
11051                 if ((val & write_mask) != write_mask)
11052                         goto out;
11053
11054                 tw32(offset, save_val);
11055         }
11056
11057         return 0;
11058
11059 out:
11060         if (netif_msg_hw(tp))
11061                 netdev_err(tp->dev,
11062                            "Register test failed at offset %x\n", offset);
11063         tw32(offset, save_val);
11064         return -EIO;
11065 }
11066
11067 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11068 {
11069         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11070         int i;
11071         u32 j;
11072
11073         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11074                 for (j = 0; j < len; j += 4) {
11075                         u32 val;
11076
11077                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11078                         tg3_read_mem(tp, offset + j, &val);
11079                         if (val != test_pattern[i])
11080                                 return -EIO;
11081                 }
11082         }
11083         return 0;
11084 }
11085
11086 static int tg3_test_memory(struct tg3 *tp)
11087 {
11088         static struct mem_entry {
11089                 u32 offset;
11090                 u32 len;
11091         } mem_tbl_570x[] = {
11092                 { 0x00000000, 0x00b50},
11093                 { 0x00002000, 0x1c000},
11094                 { 0xffffffff, 0x00000}
11095         }, mem_tbl_5705[] = {
11096                 { 0x00000100, 0x0000c},
11097                 { 0x00000200, 0x00008},
11098                 { 0x00004000, 0x00800},
11099                 { 0x00006000, 0x01000},
11100                 { 0x00008000, 0x02000},
11101                 { 0x00010000, 0x0e000},
11102                 { 0xffffffff, 0x00000}
11103         }, mem_tbl_5755[] = {
11104                 { 0x00000200, 0x00008},
11105                 { 0x00004000, 0x00800},
11106                 { 0x00006000, 0x00800},
11107                 { 0x00008000, 0x02000},
11108                 { 0x00010000, 0x0c000},
11109                 { 0xffffffff, 0x00000}
11110         }, mem_tbl_5906[] = {
11111                 { 0x00000200, 0x00008},
11112                 { 0x00004000, 0x00400},
11113                 { 0x00006000, 0x00400},
11114                 { 0x00008000, 0x01000},
11115                 { 0x00010000, 0x01000},
11116                 { 0xffffffff, 0x00000}
11117         }, mem_tbl_5717[] = {
11118                 { 0x00000200, 0x00008},
11119                 { 0x00010000, 0x0a000},
11120                 { 0x00020000, 0x13c00},
11121                 { 0xffffffff, 0x00000}
11122         }, mem_tbl_57765[] = {
11123                 { 0x00000200, 0x00008},
11124                 { 0x00004000, 0x00800},
11125                 { 0x00006000, 0x09800},
11126                 { 0x00010000, 0x0a000},
11127                 { 0xffffffff, 0x00000}
11128         };
11129         struct mem_entry *mem_tbl;
11130         int err = 0;
11131         int i;
11132
11133         if (tg3_flag(tp, 5717_PLUS))
11134                 mem_tbl = mem_tbl_5717;
11135         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11136                 mem_tbl = mem_tbl_57765;
11137         else if (tg3_flag(tp, 5755_PLUS))
11138                 mem_tbl = mem_tbl_5755;
11139         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11140                 mem_tbl = mem_tbl_5906;
11141         else if (tg3_flag(tp, 5705_PLUS))
11142                 mem_tbl = mem_tbl_5705;
11143         else
11144                 mem_tbl = mem_tbl_570x;
11145
11146         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11147                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11148                 if (err)
11149                         break;
11150         }
11151
11152         return err;
11153 }
11154
11155 #define TG3_MAC_LOOPBACK        0
11156 #define TG3_PHY_LOOPBACK        1
11157 #define TG3_TSO_LOOPBACK        2
11158
11159 #define TG3_TSO_MSS             500
11160
11161 #define TG3_TSO_IP_HDR_LEN      20
11162 #define TG3_TSO_TCP_HDR_LEN     20
11163 #define TG3_TSO_TCP_OPT_LEN     12
11164
11165 static const u8 tg3_tso_header[] = {
11166 0x08, 0x00,
11167 0x45, 0x00, 0x00, 0x00,
11168 0x00, 0x00, 0x40, 0x00,
11169 0x40, 0x06, 0x00, 0x00,
11170 0x0a, 0x00, 0x00, 0x01,
11171 0x0a, 0x00, 0x00, 0x02,
11172 0x0d, 0x00, 0xe0, 0x00,
11173 0x00, 0x00, 0x01, 0x00,
11174 0x00, 0x00, 0x02, 0x00,
11175 0x80, 0x10, 0x10, 0x00,
11176 0x14, 0x09, 0x00, 0x00,
11177 0x01, 0x01, 0x08, 0x0a,
11178 0x11, 0x11, 0x11, 0x11,
11179 0x11, 0x11, 0x11, 0x11,
11180 };
11181
11182 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11183 {
11184         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11185         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11186         struct sk_buff *skb, *rx_skb;
11187         u8 *tx_data;
11188         dma_addr_t map;
11189         int num_pkts, tx_len, rx_len, i, err;
11190         struct tg3_rx_buffer_desc *desc;
11191         struct tg3_napi *tnapi, *rnapi;
11192         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11193
11194         tnapi = &tp->napi[0];
11195         rnapi = &tp->napi[0];
11196         if (tp->irq_cnt > 1) {
11197                 if (tg3_flag(tp, ENABLE_RSS))
11198                         rnapi = &tp->napi[1];
11199                 if (tg3_flag(tp, ENABLE_TSS))
11200                         tnapi = &tp->napi[1];
11201         }
11202         coal_now = tnapi->coal_now | rnapi->coal_now;
11203
11204         if (loopback_mode == TG3_MAC_LOOPBACK) {
11205                 /* HW errata - mac loopback fails in some cases on 5780.
11206                  * Normal traffic and PHY loopback are not affected by
11207                  * errata.  Also, the MAC loopback test is deprecated for
11208                  * all newer ASIC revisions.
11209                  */
11210                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11211                     tg3_flag(tp, CPMU_PRESENT))
11212                         return 0;
11213
11214                 mac_mode = tp->mac_mode &
11215                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11216                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11217                 if (!tg3_flag(tp, 5705_PLUS))
11218                         mac_mode |= MAC_MODE_LINK_POLARITY;
11219                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11220                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11221                 else
11222                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11223                 tw32(MAC_MODE, mac_mode);
11224         } else {
11225                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11226                         tg3_phy_fet_toggle_apd(tp, false);
11227                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11228                 } else
11229                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11230
11231                 tg3_phy_toggle_automdix(tp, 0);
11232
11233                 tg3_writephy(tp, MII_BMCR, val);
11234                 udelay(40);
11235
11236                 mac_mode = tp->mac_mode &
11237                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11238                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11239                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11240                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11241                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11242                         /* The write needs to be flushed for the AC131 */
11243                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11244                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11245                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11246                 } else
11247                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11248
11249                 /* reset to prevent losing 1st rx packet intermittently */
11250                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11251                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11252                         udelay(10);
11253                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11254                 }
11255                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11256                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11257                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11258                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11259                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11260                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11261                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11262                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11263                 }
11264                 tw32(MAC_MODE, mac_mode);
11265
11266                 /* Wait for link */
11267                 for (i = 0; i < 100; i++) {
11268                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11269                                 break;
11270                         mdelay(1);
11271                 }
11272         }
11273
11274         err = -EIO;
11275
11276         tx_len = pktsz;
11277         skb = netdev_alloc_skb(tp->dev, tx_len);
11278         if (!skb)
11279                 return -ENOMEM;
11280
11281         tx_data = skb_put(skb, tx_len);
11282         memcpy(tx_data, tp->dev->dev_addr, 6);
11283         memset(tx_data + 6, 0x0, 8);
11284
11285         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11286
11287         if (loopback_mode == TG3_TSO_LOOPBACK) {
11288                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11289
11290                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11291                               TG3_TSO_TCP_OPT_LEN;
11292
11293                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11294                        sizeof(tg3_tso_header));
11295                 mss = TG3_TSO_MSS;
11296
11297                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11298                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11299
11300                 /* Set the total length field in the IP header */
11301                 iph->tot_len = htons((u16)(mss + hdr_len));
11302
11303                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11304                               TXD_FLAG_CPU_POST_DMA);
11305
11306                 if (tg3_flag(tp, HW_TSO_1) ||
11307                     tg3_flag(tp, HW_TSO_2) ||
11308                     tg3_flag(tp, HW_TSO_3)) {
11309                         struct tcphdr *th;
11310                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11311                         th = (struct tcphdr *)&tx_data[val];
11312                         th->check = 0;
11313                 } else
11314                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11315
11316                 if (tg3_flag(tp, HW_TSO_3)) {
11317                         mss |= (hdr_len & 0xc) << 12;
11318                         if (hdr_len & 0x10)
11319                                 base_flags |= 0x00000010;
11320                         base_flags |= (hdr_len & 0x3e0) << 5;
11321                 } else if (tg3_flag(tp, HW_TSO_2))
11322                         mss |= hdr_len << 9;
11323                 else if (tg3_flag(tp, HW_TSO_1) ||
11324                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11325                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11326                 } else {
11327                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11328                 }
11329
11330                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11331         } else {
11332                 num_pkts = 1;
11333                 data_off = ETH_HLEN;
11334         }
11335
11336         for (i = data_off; i < tx_len; i++)
11337                 tx_data[i] = (u8) (i & 0xff);
11338
11339         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11340         if (pci_dma_mapping_error(tp->pdev, map)) {
11341                 dev_kfree_skb(skb);
11342                 return -EIO;
11343         }
11344
11345         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11346                rnapi->coal_now);
11347
11348         udelay(10);
11349
11350         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11351
11352         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11353                     base_flags, (mss << 1) | 1);
11354
11355         tnapi->tx_prod++;
11356
11357         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11358         tr32_mailbox(tnapi->prodmbox);
11359
11360         udelay(10);
11361
11362         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11363         for (i = 0; i < 35; i++) {
11364                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11365                        coal_now);
11366
11367                 udelay(10);
11368
11369                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11370                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11371                 if ((tx_idx == tnapi->tx_prod) &&
11372                     (rx_idx == (rx_start_idx + num_pkts)))
11373                         break;
11374         }
11375
11376         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11377         dev_kfree_skb(skb);
11378
11379         if (tx_idx != tnapi->tx_prod)
11380                 goto out;
11381
11382         if (rx_idx != rx_start_idx + num_pkts)
11383                 goto out;
11384
11385         val = data_off;
11386         while (rx_idx != rx_start_idx) {
11387                 desc = &rnapi->rx_rcb[rx_start_idx++];
11388                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11389                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11390
11391                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11392                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11393                         goto out;
11394
11395                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11396                          - ETH_FCS_LEN;
11397
11398                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11399                         if (rx_len != tx_len)
11400                                 goto out;
11401
11402                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11403                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11404                                         goto out;
11405                         } else {
11406                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11407                                         goto out;
11408                         }
11409                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11410                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11411                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11412                         goto out;
11413                 }
11414
11415                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11416                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11417                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11418                                              mapping);
11419                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11420                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11421                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11422                                              mapping);
11423                 } else
11424                         goto out;
11425
11426                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11427                                             PCI_DMA_FROMDEVICE);
11428
11429                 for (i = data_off; i < rx_len; i++, val++) {
11430                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11431                                 goto out;
11432                 }
11433         }
11434
11435         err = 0;
11436
11437         /* tg3_free_rings will unmap and free the rx_skb */
11438 out:
11439         return err;
11440 }
11441
11442 #define TG3_STD_LOOPBACK_FAILED         1
11443 #define TG3_JMB_LOOPBACK_FAILED         2
11444 #define TG3_TSO_LOOPBACK_FAILED         4
11445
11446 #define TG3_MAC_LOOPBACK_SHIFT          0
11447 #define TG3_PHY_LOOPBACK_SHIFT          4
11448 #define TG3_LOOPBACK_FAILED             0x00000077
11449
11450 static int tg3_test_loopback(struct tg3 *tp)
11451 {
11452         int err = 0;
11453         u32 eee_cap, cpmuctrl = 0;
11454
11455         if (!netif_running(tp->dev))
11456                 return TG3_LOOPBACK_FAILED;
11457
11458         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11459         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11460
11461         err = tg3_reset_hw(tp, 1);
11462         if (err) {
11463                 err = TG3_LOOPBACK_FAILED;
11464                 goto done;
11465         }
11466
11467         if (tg3_flag(tp, ENABLE_RSS)) {
11468                 int i;
11469
11470                 /* Reroute all rx packets to the 1st queue */
11471                 for (i = MAC_RSS_INDIR_TBL_0;
11472                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11473                         tw32(i, 0x0);
11474         }
11475
11476         /* Turn off gphy autopowerdown. */
11477         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11478                 tg3_phy_toggle_apd(tp, false);
11479
11480         if (tg3_flag(tp, CPMU_PRESENT)) {
11481                 int i;
11482                 u32 status;
11483
11484                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11485
11486                 /* Wait for up to 40 microseconds to acquire lock. */
11487                 for (i = 0; i < 4; i++) {
11488                         status = tr32(TG3_CPMU_MUTEX_GNT);
11489                         if (status == CPMU_MUTEX_GNT_DRIVER)
11490                                 break;
11491                         udelay(10);
11492                 }
11493
11494                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11495                         err = TG3_LOOPBACK_FAILED;
11496                         goto done;
11497                 }
11498
11499                 /* Turn off link-based power management. */
11500                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11501                 tw32(TG3_CPMU_CTRL,
11502                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11503                                   CPMU_CTRL_LINK_AWARE_MODE));
11504         }
11505
11506         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11507                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11508
11509         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11510             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11511                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11512
11513         if (tg3_flag(tp, CPMU_PRESENT)) {
11514                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11515
11516                 /* Release the mutex */
11517                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11518         }
11519
11520         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11521             !tg3_flag(tp, USE_PHYLIB)) {
11522                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11523                         err |= TG3_STD_LOOPBACK_FAILED <<
11524                                TG3_PHY_LOOPBACK_SHIFT;
11525                 if (tg3_flag(tp, TSO_CAPABLE) &&
11526                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11527                         err |= TG3_TSO_LOOPBACK_FAILED <<
11528                                TG3_PHY_LOOPBACK_SHIFT;
11529                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11530                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11531                         err |= TG3_JMB_LOOPBACK_FAILED <<
11532                                TG3_PHY_LOOPBACK_SHIFT;
11533         }
11534
11535         /* Re-enable gphy autopowerdown. */
11536         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11537                 tg3_phy_toggle_apd(tp, true);
11538
11539 done:
11540         tp->phy_flags |= eee_cap;
11541
11542         return err;
11543 }
11544
11545 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11546                           u64 *data)
11547 {
11548         struct tg3 *tp = netdev_priv(dev);
11549
11550         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11551             tg3_power_up(tp)) {
11552                 etest->flags |= ETH_TEST_FL_FAILED;
11553                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11554                 return;
11555         }
11556
11557         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11558
11559         if (tg3_test_nvram(tp) != 0) {
11560                 etest->flags |= ETH_TEST_FL_FAILED;
11561                 data[0] = 1;
11562         }
11563         if (tg3_test_link(tp) != 0) {
11564                 etest->flags |= ETH_TEST_FL_FAILED;
11565                 data[1] = 1;
11566         }
11567         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11568                 int err, err2 = 0, irq_sync = 0;
11569
11570                 if (netif_running(dev)) {
11571                         tg3_phy_stop(tp);
11572                         tg3_netif_stop(tp);
11573                         irq_sync = 1;
11574                 }
11575
11576                 tg3_full_lock(tp, irq_sync);
11577
11578                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11579                 err = tg3_nvram_lock(tp);
11580                 tg3_halt_cpu(tp, RX_CPU_BASE);
11581                 if (!tg3_flag(tp, 5705_PLUS))
11582                         tg3_halt_cpu(tp, TX_CPU_BASE);
11583                 if (!err)
11584                         tg3_nvram_unlock(tp);
11585
11586                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11587                         tg3_phy_reset(tp);
11588
11589                 if (tg3_test_registers(tp) != 0) {
11590                         etest->flags |= ETH_TEST_FL_FAILED;
11591                         data[2] = 1;
11592                 }
11593                 if (tg3_test_memory(tp) != 0) {
11594                         etest->flags |= ETH_TEST_FL_FAILED;
11595                         data[3] = 1;
11596                 }
11597                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11598                         etest->flags |= ETH_TEST_FL_FAILED;
11599
11600                 tg3_full_unlock(tp);
11601
11602                 if (tg3_test_interrupt(tp) != 0) {
11603                         etest->flags |= ETH_TEST_FL_FAILED;
11604                         data[5] = 1;
11605                 }
11606
11607                 tg3_full_lock(tp, 0);
11608
11609                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11610                 if (netif_running(dev)) {
11611                         tg3_flag_set(tp, INIT_COMPLETE);
11612                         err2 = tg3_restart_hw(tp, 1);
11613                         if (!err2)
11614                                 tg3_netif_start(tp);
11615                 }
11616
11617                 tg3_full_unlock(tp);
11618
11619                 if (irq_sync && !err2)
11620                         tg3_phy_start(tp);
11621         }
11622         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11623                 tg3_power_down(tp);
11624
11625 }
11626
11627 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11628 {
11629         struct mii_ioctl_data *data = if_mii(ifr);
11630         struct tg3 *tp = netdev_priv(dev);
11631         int err;
11632
11633         if (tg3_flag(tp, USE_PHYLIB)) {
11634                 struct phy_device *phydev;
11635                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11636                         return -EAGAIN;
11637                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11638                 return phy_mii_ioctl(phydev, ifr, cmd);
11639         }
11640
11641         switch (cmd) {
11642         case SIOCGMIIPHY:
11643                 data->phy_id = tp->phy_addr;
11644
11645                 /* fallthru */
11646         case SIOCGMIIREG: {
11647                 u32 mii_regval;
11648
11649                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11650                         break;                  /* We have no PHY */
11651
11652                 if (!netif_running(dev))
11653                         return -EAGAIN;
11654
11655                 spin_lock_bh(&tp->lock);
11656                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11657                 spin_unlock_bh(&tp->lock);
11658
11659                 data->val_out = mii_regval;
11660
11661                 return err;
11662         }
11663
11664         case SIOCSMIIREG:
11665                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11666                         break;                  /* We have no PHY */
11667
11668                 if (!netif_running(dev))
11669                         return -EAGAIN;
11670
11671                 spin_lock_bh(&tp->lock);
11672                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11673                 spin_unlock_bh(&tp->lock);
11674
11675                 return err;
11676
11677         default:
11678                 /* do nothing */
11679                 break;
11680         }
11681         return -EOPNOTSUPP;
11682 }
11683
11684 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11685 {
11686         struct tg3 *tp = netdev_priv(dev);
11687
11688         memcpy(ec, &tp->coal, sizeof(*ec));
11689         return 0;
11690 }
11691
11692 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11693 {
11694         struct tg3 *tp = netdev_priv(dev);
11695         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11696         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11697
11698         if (!tg3_flag(tp, 5705_PLUS)) {
11699                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11700                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11701                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11702                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11703         }
11704
11705         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11706             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11707             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11708             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11709             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11710             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11711             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11712             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11713             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11714             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11715                 return -EINVAL;
11716
11717         /* No rx interrupts will be generated if both are zero */
11718         if ((ec->rx_coalesce_usecs == 0) &&
11719             (ec->rx_max_coalesced_frames == 0))
11720                 return -EINVAL;
11721
11722         /* No tx interrupts will be generated if both are zero */
11723         if ((ec->tx_coalesce_usecs == 0) &&
11724             (ec->tx_max_coalesced_frames == 0))
11725                 return -EINVAL;
11726
11727         /* Only copy relevant parameters, ignore all others. */
11728         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11729         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11730         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11731         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11732         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11733         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11734         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11735         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11736         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11737
11738         if (netif_running(dev)) {
11739                 tg3_full_lock(tp, 0);
11740                 __tg3_set_coalesce(tp, &tp->coal);
11741                 tg3_full_unlock(tp);
11742         }
11743         return 0;
11744 }
11745
11746 static const struct ethtool_ops tg3_ethtool_ops = {
11747         .get_settings           = tg3_get_settings,
11748         .set_settings           = tg3_set_settings,
11749         .get_drvinfo            = tg3_get_drvinfo,
11750         .get_regs_len           = tg3_get_regs_len,
11751         .get_regs               = tg3_get_regs,
11752         .get_wol                = tg3_get_wol,
11753         .set_wol                = tg3_set_wol,
11754         .get_msglevel           = tg3_get_msglevel,
11755         .set_msglevel           = tg3_set_msglevel,
11756         .nway_reset             = tg3_nway_reset,
11757         .get_link               = ethtool_op_get_link,
11758         .get_eeprom_len         = tg3_get_eeprom_len,
11759         .get_eeprom             = tg3_get_eeprom,
11760         .set_eeprom             = tg3_set_eeprom,
11761         .get_ringparam          = tg3_get_ringparam,
11762         .set_ringparam          = tg3_set_ringparam,
11763         .get_pauseparam         = tg3_get_pauseparam,
11764         .set_pauseparam         = tg3_set_pauseparam,
11765         .self_test              = tg3_self_test,
11766         .get_strings            = tg3_get_strings,
11767         .set_phys_id            = tg3_set_phys_id,
11768         .get_ethtool_stats      = tg3_get_ethtool_stats,
11769         .get_coalesce           = tg3_get_coalesce,
11770         .set_coalesce           = tg3_set_coalesce,
11771         .get_sset_count         = tg3_get_sset_count,
11772 };
11773
11774 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11775 {
11776         u32 cursize, val, magic;
11777
11778         tp->nvram_size = EEPROM_CHIP_SIZE;
11779
11780         if (tg3_nvram_read(tp, 0, &magic) != 0)
11781                 return;
11782
11783         if ((magic != TG3_EEPROM_MAGIC) &&
11784             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11785             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11786                 return;
11787
11788         /*
11789          * Size the chip by reading offsets at increasing powers of two.
11790          * When we encounter our validation signature, we know the addressing
11791          * has wrapped around, and thus have our chip size.
11792          */
11793         cursize = 0x10;
11794
11795         while (cursize < tp->nvram_size) {
11796                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11797                         return;
11798
11799                 if (val == magic)
11800                         break;
11801
11802                 cursize <<= 1;
11803         }
11804
11805         tp->nvram_size = cursize;
11806 }
11807
11808 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11809 {
11810         u32 val;
11811
11812         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11813                 return;
11814
11815         /* Selfboot format */
11816         if (val != TG3_EEPROM_MAGIC) {
11817                 tg3_get_eeprom_size(tp);
11818                 return;
11819         }
11820
11821         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11822                 if (val != 0) {
11823                         /* This is confusing.  We want to operate on the
11824                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11825                          * call will read from NVRAM and byteswap the data
11826                          * according to the byteswapping settings for all
11827                          * other register accesses.  This ensures the data we
11828                          * want will always reside in the lower 16-bits.
11829                          * However, the data in NVRAM is in LE format, which
11830                          * means the data from the NVRAM read will always be
11831                          * opposite the endianness of the CPU.  The 16-bit
11832                          * byteswap then brings the data to CPU endianness.
11833                          */
11834                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11835                         return;
11836                 }
11837         }
11838         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11839 }
11840
11841 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11842 {
11843         u32 nvcfg1;
11844
11845         nvcfg1 = tr32(NVRAM_CFG1);
11846         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11847                 tg3_flag_set(tp, FLASH);
11848         } else {
11849                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11850                 tw32(NVRAM_CFG1, nvcfg1);
11851         }
11852
11853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11854             tg3_flag(tp, 5780_CLASS)) {
11855                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11856                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11857                         tp->nvram_jedecnum = JEDEC_ATMEL;
11858                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11859                         tg3_flag_set(tp, NVRAM_BUFFERED);
11860                         break;
11861                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11862                         tp->nvram_jedecnum = JEDEC_ATMEL;
11863                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11864                         break;
11865                 case FLASH_VENDOR_ATMEL_EEPROM:
11866                         tp->nvram_jedecnum = JEDEC_ATMEL;
11867                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11868                         tg3_flag_set(tp, NVRAM_BUFFERED);
11869                         break;
11870                 case FLASH_VENDOR_ST:
11871                         tp->nvram_jedecnum = JEDEC_ST;
11872                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11873                         tg3_flag_set(tp, NVRAM_BUFFERED);
11874                         break;
11875                 case FLASH_VENDOR_SAIFUN:
11876                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11877                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11878                         break;
11879                 case FLASH_VENDOR_SST_SMALL:
11880                 case FLASH_VENDOR_SST_LARGE:
11881                         tp->nvram_jedecnum = JEDEC_SST;
11882                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11883                         break;
11884                 }
11885         } else {
11886                 tp->nvram_jedecnum = JEDEC_ATMEL;
11887                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11888                 tg3_flag_set(tp, NVRAM_BUFFERED);
11889         }
11890 }
11891
11892 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11893 {
11894         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11895         case FLASH_5752PAGE_SIZE_256:
11896                 tp->nvram_pagesize = 256;
11897                 break;
11898         case FLASH_5752PAGE_SIZE_512:
11899                 tp->nvram_pagesize = 512;
11900                 break;
11901         case FLASH_5752PAGE_SIZE_1K:
11902                 tp->nvram_pagesize = 1024;
11903                 break;
11904         case FLASH_5752PAGE_SIZE_2K:
11905                 tp->nvram_pagesize = 2048;
11906                 break;
11907         case FLASH_5752PAGE_SIZE_4K:
11908                 tp->nvram_pagesize = 4096;
11909                 break;
11910         case FLASH_5752PAGE_SIZE_264:
11911                 tp->nvram_pagesize = 264;
11912                 break;
11913         case FLASH_5752PAGE_SIZE_528:
11914                 tp->nvram_pagesize = 528;
11915                 break;
11916         }
11917 }
11918
11919 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11920 {
11921         u32 nvcfg1;
11922
11923         nvcfg1 = tr32(NVRAM_CFG1);
11924
11925         /* NVRAM protection for TPM */
11926         if (nvcfg1 & (1 << 27))
11927                 tg3_flag_set(tp, PROTECTED_NVRAM);
11928
11929         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11930         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11931         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11932                 tp->nvram_jedecnum = JEDEC_ATMEL;
11933                 tg3_flag_set(tp, NVRAM_BUFFERED);
11934                 break;
11935         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11936                 tp->nvram_jedecnum = JEDEC_ATMEL;
11937                 tg3_flag_set(tp, NVRAM_BUFFERED);
11938                 tg3_flag_set(tp, FLASH);
11939                 break;
11940         case FLASH_5752VENDOR_ST_M45PE10:
11941         case FLASH_5752VENDOR_ST_M45PE20:
11942         case FLASH_5752VENDOR_ST_M45PE40:
11943                 tp->nvram_jedecnum = JEDEC_ST;
11944                 tg3_flag_set(tp, NVRAM_BUFFERED);
11945                 tg3_flag_set(tp, FLASH);
11946                 break;
11947         }
11948
11949         if (tg3_flag(tp, FLASH)) {
11950                 tg3_nvram_get_pagesize(tp, nvcfg1);
11951         } else {
11952                 /* For eeprom, set pagesize to maximum eeprom size */
11953                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11954
11955                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11956                 tw32(NVRAM_CFG1, nvcfg1);
11957         }
11958 }
11959
11960 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11961 {
11962         u32 nvcfg1, protect = 0;
11963
11964         nvcfg1 = tr32(NVRAM_CFG1);
11965
11966         /* NVRAM protection for TPM */
11967         if (nvcfg1 & (1 << 27)) {
11968                 tg3_flag_set(tp, PROTECTED_NVRAM);
11969                 protect = 1;
11970         }
11971
11972         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11973         switch (nvcfg1) {
11974         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11975         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11976         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11977         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11978                 tp->nvram_jedecnum = JEDEC_ATMEL;
11979                 tg3_flag_set(tp, NVRAM_BUFFERED);
11980                 tg3_flag_set(tp, FLASH);
11981                 tp->nvram_pagesize = 264;
11982                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11983                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11984                         tp->nvram_size = (protect ? 0x3e200 :
11985                                           TG3_NVRAM_SIZE_512KB);
11986                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11987                         tp->nvram_size = (protect ? 0x1f200 :
11988                                           TG3_NVRAM_SIZE_256KB);
11989                 else
11990                         tp->nvram_size = (protect ? 0x1f200 :
11991                                           TG3_NVRAM_SIZE_128KB);
11992                 break;
11993         case FLASH_5752VENDOR_ST_M45PE10:
11994         case FLASH_5752VENDOR_ST_M45PE20:
11995         case FLASH_5752VENDOR_ST_M45PE40:
11996                 tp->nvram_jedecnum = JEDEC_ST;
11997                 tg3_flag_set(tp, NVRAM_BUFFERED);
11998                 tg3_flag_set(tp, FLASH);
11999                 tp->nvram_pagesize = 256;
12000                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12001                         tp->nvram_size = (protect ?
12002                                           TG3_NVRAM_SIZE_64KB :
12003                                           TG3_NVRAM_SIZE_128KB);
12004                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12005                         tp->nvram_size = (protect ?
12006                                           TG3_NVRAM_SIZE_64KB :
12007                                           TG3_NVRAM_SIZE_256KB);
12008                 else
12009                         tp->nvram_size = (protect ?
12010                                           TG3_NVRAM_SIZE_128KB :
12011                                           TG3_NVRAM_SIZE_512KB);
12012                 break;
12013         }
12014 }
12015
12016 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12017 {
12018         u32 nvcfg1;
12019
12020         nvcfg1 = tr32(NVRAM_CFG1);
12021
12022         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12023         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12024         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12025         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12026         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12027                 tp->nvram_jedecnum = JEDEC_ATMEL;
12028                 tg3_flag_set(tp, NVRAM_BUFFERED);
12029                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12030
12031                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032                 tw32(NVRAM_CFG1, nvcfg1);
12033                 break;
12034         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12035         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12036         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12037         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12038                 tp->nvram_jedecnum = JEDEC_ATMEL;
12039                 tg3_flag_set(tp, NVRAM_BUFFERED);
12040                 tg3_flag_set(tp, FLASH);
12041                 tp->nvram_pagesize = 264;
12042                 break;
12043         case FLASH_5752VENDOR_ST_M45PE10:
12044         case FLASH_5752VENDOR_ST_M45PE20:
12045         case FLASH_5752VENDOR_ST_M45PE40:
12046                 tp->nvram_jedecnum = JEDEC_ST;
12047                 tg3_flag_set(tp, NVRAM_BUFFERED);
12048                 tg3_flag_set(tp, FLASH);
12049                 tp->nvram_pagesize = 256;
12050                 break;
12051         }
12052 }
12053
12054 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12055 {
12056         u32 nvcfg1, protect = 0;
12057
12058         nvcfg1 = tr32(NVRAM_CFG1);
12059
12060         /* NVRAM protection for TPM */
12061         if (nvcfg1 & (1 << 27)) {
12062                 tg3_flag_set(tp, PROTECTED_NVRAM);
12063                 protect = 1;
12064         }
12065
12066         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12067         switch (nvcfg1) {
12068         case FLASH_5761VENDOR_ATMEL_ADB021D:
12069         case FLASH_5761VENDOR_ATMEL_ADB041D:
12070         case FLASH_5761VENDOR_ATMEL_ADB081D:
12071         case FLASH_5761VENDOR_ATMEL_ADB161D:
12072         case FLASH_5761VENDOR_ATMEL_MDB021D:
12073         case FLASH_5761VENDOR_ATMEL_MDB041D:
12074         case FLASH_5761VENDOR_ATMEL_MDB081D:
12075         case FLASH_5761VENDOR_ATMEL_MDB161D:
12076                 tp->nvram_jedecnum = JEDEC_ATMEL;
12077                 tg3_flag_set(tp, NVRAM_BUFFERED);
12078                 tg3_flag_set(tp, FLASH);
12079                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12080                 tp->nvram_pagesize = 256;
12081                 break;
12082         case FLASH_5761VENDOR_ST_A_M45PE20:
12083         case FLASH_5761VENDOR_ST_A_M45PE40:
12084         case FLASH_5761VENDOR_ST_A_M45PE80:
12085         case FLASH_5761VENDOR_ST_A_M45PE16:
12086         case FLASH_5761VENDOR_ST_M_M45PE20:
12087         case FLASH_5761VENDOR_ST_M_M45PE40:
12088         case FLASH_5761VENDOR_ST_M_M45PE80:
12089         case FLASH_5761VENDOR_ST_M_M45PE16:
12090                 tp->nvram_jedecnum = JEDEC_ST;
12091                 tg3_flag_set(tp, NVRAM_BUFFERED);
12092                 tg3_flag_set(tp, FLASH);
12093                 tp->nvram_pagesize = 256;
12094                 break;
12095         }
12096
12097         if (protect) {
12098                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12099         } else {
12100                 switch (nvcfg1) {
12101                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12102                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12103                 case FLASH_5761VENDOR_ST_A_M45PE16:
12104                 case FLASH_5761VENDOR_ST_M_M45PE16:
12105                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12106                         break;
12107                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12108                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12109                 case FLASH_5761VENDOR_ST_A_M45PE80:
12110                 case FLASH_5761VENDOR_ST_M_M45PE80:
12111                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12112                         break;
12113                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12114                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12115                 case FLASH_5761VENDOR_ST_A_M45PE40:
12116                 case FLASH_5761VENDOR_ST_M_M45PE40:
12117                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12118                         break;
12119                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12120                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12121                 case FLASH_5761VENDOR_ST_A_M45PE20:
12122                 case FLASH_5761VENDOR_ST_M_M45PE20:
12123                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12124                         break;
12125                 }
12126         }
12127 }
12128
12129 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12130 {
12131         tp->nvram_jedecnum = JEDEC_ATMEL;
12132         tg3_flag_set(tp, NVRAM_BUFFERED);
12133         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12134 }
12135
12136 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12137 {
12138         u32 nvcfg1;
12139
12140         nvcfg1 = tr32(NVRAM_CFG1);
12141
12142         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12143         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12144         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12145                 tp->nvram_jedecnum = JEDEC_ATMEL;
12146                 tg3_flag_set(tp, NVRAM_BUFFERED);
12147                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12148
12149                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12150                 tw32(NVRAM_CFG1, nvcfg1);
12151                 return;
12152         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12153         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12154         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12155         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12156         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12157         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12158         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12159                 tp->nvram_jedecnum = JEDEC_ATMEL;
12160                 tg3_flag_set(tp, NVRAM_BUFFERED);
12161                 tg3_flag_set(tp, FLASH);
12162
12163                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12164                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12165                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12166                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12167                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12168                         break;
12169                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12170                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12171                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12172                         break;
12173                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12174                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12175                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12176                         break;
12177                 }
12178                 break;
12179         case FLASH_5752VENDOR_ST_M45PE10:
12180         case FLASH_5752VENDOR_ST_M45PE20:
12181         case FLASH_5752VENDOR_ST_M45PE40:
12182                 tp->nvram_jedecnum = JEDEC_ST;
12183                 tg3_flag_set(tp, NVRAM_BUFFERED);
12184                 tg3_flag_set(tp, FLASH);
12185
12186                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12187                 case FLASH_5752VENDOR_ST_M45PE10:
12188                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12189                         break;
12190                 case FLASH_5752VENDOR_ST_M45PE20:
12191                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12192                         break;
12193                 case FLASH_5752VENDOR_ST_M45PE40:
12194                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12195                         break;
12196                 }
12197                 break;
12198         default:
12199                 tg3_flag_set(tp, NO_NVRAM);
12200                 return;
12201         }
12202
12203         tg3_nvram_get_pagesize(tp, nvcfg1);
12204         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12205                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12206 }
12207
12208
12209 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12210 {
12211         u32 nvcfg1;
12212
12213         nvcfg1 = tr32(NVRAM_CFG1);
12214
12215         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12216         case FLASH_5717VENDOR_ATMEL_EEPROM:
12217         case FLASH_5717VENDOR_MICRO_EEPROM:
12218                 tp->nvram_jedecnum = JEDEC_ATMEL;
12219                 tg3_flag_set(tp, NVRAM_BUFFERED);
12220                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12221
12222                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12223                 tw32(NVRAM_CFG1, nvcfg1);
12224                 return;
12225         case FLASH_5717VENDOR_ATMEL_MDB011D:
12226         case FLASH_5717VENDOR_ATMEL_ADB011B:
12227         case FLASH_5717VENDOR_ATMEL_ADB011D:
12228         case FLASH_5717VENDOR_ATMEL_MDB021D:
12229         case FLASH_5717VENDOR_ATMEL_ADB021B:
12230         case FLASH_5717VENDOR_ATMEL_ADB021D:
12231         case FLASH_5717VENDOR_ATMEL_45USPT:
12232                 tp->nvram_jedecnum = JEDEC_ATMEL;
12233                 tg3_flag_set(tp, NVRAM_BUFFERED);
12234                 tg3_flag_set(tp, FLASH);
12235
12236                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12237                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12238                         /* Detect size with tg3_nvram_get_size() */
12239                         break;
12240                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12241                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12242                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12243                         break;
12244                 default:
12245                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12246                         break;
12247                 }
12248                 break;
12249         case FLASH_5717VENDOR_ST_M_M25PE10:
12250         case FLASH_5717VENDOR_ST_A_M25PE10:
12251         case FLASH_5717VENDOR_ST_M_M45PE10:
12252         case FLASH_5717VENDOR_ST_A_M45PE10:
12253         case FLASH_5717VENDOR_ST_M_M25PE20:
12254         case FLASH_5717VENDOR_ST_A_M25PE20:
12255         case FLASH_5717VENDOR_ST_M_M45PE20:
12256         case FLASH_5717VENDOR_ST_A_M45PE20:
12257         case FLASH_5717VENDOR_ST_25USPT:
12258         case FLASH_5717VENDOR_ST_45USPT:
12259                 tp->nvram_jedecnum = JEDEC_ST;
12260                 tg3_flag_set(tp, NVRAM_BUFFERED);
12261                 tg3_flag_set(tp, FLASH);
12262
12263                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12264                 case FLASH_5717VENDOR_ST_M_M25PE20:
12265                 case FLASH_5717VENDOR_ST_M_M45PE20:
12266                         /* Detect size with tg3_nvram_get_size() */
12267                         break;
12268                 case FLASH_5717VENDOR_ST_A_M25PE20:
12269                 case FLASH_5717VENDOR_ST_A_M45PE20:
12270                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12271                         break;
12272                 default:
12273                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12274                         break;
12275                 }
12276                 break;
12277         default:
12278                 tg3_flag_set(tp, NO_NVRAM);
12279                 return;
12280         }
12281
12282         tg3_nvram_get_pagesize(tp, nvcfg1);
12283         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12284                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12285 }
12286
12287 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12288 {
12289         u32 nvcfg1, nvmpinstrp;
12290
12291         nvcfg1 = tr32(NVRAM_CFG1);
12292         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12293
12294         switch (nvmpinstrp) {
12295         case FLASH_5720_EEPROM_HD:
12296         case FLASH_5720_EEPROM_LD:
12297                 tp->nvram_jedecnum = JEDEC_ATMEL;
12298                 tg3_flag_set(tp, NVRAM_BUFFERED);
12299
12300                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12301                 tw32(NVRAM_CFG1, nvcfg1);
12302                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12303                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12304                 else
12305                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12306                 return;
12307         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12308         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12309         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12310         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12311         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12312         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12313         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12314         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12315         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12316         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12317         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12318         case FLASH_5720VENDOR_ATMEL_45USPT:
12319                 tp->nvram_jedecnum = JEDEC_ATMEL;
12320                 tg3_flag_set(tp, NVRAM_BUFFERED);
12321                 tg3_flag_set(tp, FLASH);
12322
12323                 switch (nvmpinstrp) {
12324                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12325                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12326                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12327                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12328                         break;
12329                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12330                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12331                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12332                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12333                         break;
12334                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12335                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12336                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12337                         break;
12338                 default:
12339                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12340                         break;
12341                 }
12342                 break;
12343         case FLASH_5720VENDOR_M_ST_M25PE10:
12344         case FLASH_5720VENDOR_M_ST_M45PE10:
12345         case FLASH_5720VENDOR_A_ST_M25PE10:
12346         case FLASH_5720VENDOR_A_ST_M45PE10:
12347         case FLASH_5720VENDOR_M_ST_M25PE20:
12348         case FLASH_5720VENDOR_M_ST_M45PE20:
12349         case FLASH_5720VENDOR_A_ST_M25PE20:
12350         case FLASH_5720VENDOR_A_ST_M45PE20:
12351         case FLASH_5720VENDOR_M_ST_M25PE40:
12352         case FLASH_5720VENDOR_M_ST_M45PE40:
12353         case FLASH_5720VENDOR_A_ST_M25PE40:
12354         case FLASH_5720VENDOR_A_ST_M45PE40:
12355         case FLASH_5720VENDOR_M_ST_M25PE80:
12356         case FLASH_5720VENDOR_M_ST_M45PE80:
12357         case FLASH_5720VENDOR_A_ST_M25PE80:
12358         case FLASH_5720VENDOR_A_ST_M45PE80:
12359         case FLASH_5720VENDOR_ST_25USPT:
12360         case FLASH_5720VENDOR_ST_45USPT:
12361                 tp->nvram_jedecnum = JEDEC_ST;
12362                 tg3_flag_set(tp, NVRAM_BUFFERED);
12363                 tg3_flag_set(tp, FLASH);
12364
12365                 switch (nvmpinstrp) {
12366                 case FLASH_5720VENDOR_M_ST_M25PE20:
12367                 case FLASH_5720VENDOR_M_ST_M45PE20:
12368                 case FLASH_5720VENDOR_A_ST_M25PE20:
12369                 case FLASH_5720VENDOR_A_ST_M45PE20:
12370                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12371                         break;
12372                 case FLASH_5720VENDOR_M_ST_M25PE40:
12373                 case FLASH_5720VENDOR_M_ST_M45PE40:
12374                 case FLASH_5720VENDOR_A_ST_M25PE40:
12375                 case FLASH_5720VENDOR_A_ST_M45PE40:
12376                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12377                         break;
12378                 case FLASH_5720VENDOR_M_ST_M25PE80:
12379                 case FLASH_5720VENDOR_M_ST_M45PE80:
12380                 case FLASH_5720VENDOR_A_ST_M25PE80:
12381                 case FLASH_5720VENDOR_A_ST_M45PE80:
12382                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12383                         break;
12384                 default:
12385                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12386                         break;
12387                 }
12388                 break;
12389         default:
12390                 tg3_flag_set(tp, NO_NVRAM);
12391                 return;
12392         }
12393
12394         tg3_nvram_get_pagesize(tp, nvcfg1);
12395         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12396                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12397 }
12398
12399 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12400 static void __devinit tg3_nvram_init(struct tg3 *tp)
12401 {
12402         tw32_f(GRC_EEPROM_ADDR,
12403              (EEPROM_ADDR_FSM_RESET |
12404               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12405                EEPROM_ADDR_CLKPERD_SHIFT)));
12406
12407         msleep(1);
12408
12409         /* Enable seeprom accesses. */
12410         tw32_f(GRC_LOCAL_CTRL,
12411              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12412         udelay(100);
12413
12414         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12415             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12416                 tg3_flag_set(tp, NVRAM);
12417
12418                 if (tg3_nvram_lock(tp)) {
12419                         netdev_warn(tp->dev,
12420                                     "Cannot get nvram lock, %s failed\n",
12421                                     __func__);
12422                         return;
12423                 }
12424                 tg3_enable_nvram_access(tp);
12425
12426                 tp->nvram_size = 0;
12427
12428                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12429                         tg3_get_5752_nvram_info(tp);
12430                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12431                         tg3_get_5755_nvram_info(tp);
12432                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12433                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12434                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12435                         tg3_get_5787_nvram_info(tp);
12436                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12437                         tg3_get_5761_nvram_info(tp);
12438                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12439                         tg3_get_5906_nvram_info(tp);
12440                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12441                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12442                         tg3_get_57780_nvram_info(tp);
12443                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12444                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12445                         tg3_get_5717_nvram_info(tp);
12446                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12447                         tg3_get_5720_nvram_info(tp);
12448                 else
12449                         tg3_get_nvram_info(tp);
12450
12451                 if (tp->nvram_size == 0)
12452                         tg3_get_nvram_size(tp);
12453
12454                 tg3_disable_nvram_access(tp);
12455                 tg3_nvram_unlock(tp);
12456
12457         } else {
12458                 tg3_flag_clear(tp, NVRAM);
12459                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12460
12461                 tg3_get_eeprom_size(tp);
12462         }
12463 }
12464
12465 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12466                                     u32 offset, u32 len, u8 *buf)
12467 {
12468         int i, j, rc = 0;
12469         u32 val;
12470
12471         for (i = 0; i < len; i += 4) {
12472                 u32 addr;
12473                 __be32 data;
12474
12475                 addr = offset + i;
12476
12477                 memcpy(&data, buf + i, 4);
12478
12479                 /*
12480                  * The SEEPROM interface expects the data to always be opposite
12481                  * the native endian format.  We accomplish this by reversing
12482                  * all the operations that would have been performed on the
12483                  * data from a call to tg3_nvram_read_be32().
12484                  */
12485                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12486
12487                 val = tr32(GRC_EEPROM_ADDR);
12488                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12489
12490                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12491                         EEPROM_ADDR_READ);
12492                 tw32(GRC_EEPROM_ADDR, val |
12493                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12494                         (addr & EEPROM_ADDR_ADDR_MASK) |
12495                         EEPROM_ADDR_START |
12496                         EEPROM_ADDR_WRITE);
12497
12498                 for (j = 0; j < 1000; j++) {
12499                         val = tr32(GRC_EEPROM_ADDR);
12500
12501                         if (val & EEPROM_ADDR_COMPLETE)
12502                                 break;
12503                         msleep(1);
12504                 }
12505                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12506                         rc = -EBUSY;
12507                         break;
12508                 }
12509         }
12510
12511         return rc;
12512 }
12513
12514 /* offset and length are dword aligned */
12515 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12516                 u8 *buf)
12517 {
12518         int ret = 0;
12519         u32 pagesize = tp->nvram_pagesize;
12520         u32 pagemask = pagesize - 1;
12521         u32 nvram_cmd;
12522         u8 *tmp;
12523
12524         tmp = kmalloc(pagesize, GFP_KERNEL);
12525         if (tmp == NULL)
12526                 return -ENOMEM;
12527
12528         while (len) {
12529                 int j;
12530                 u32 phy_addr, page_off, size;
12531
12532                 phy_addr = offset & ~pagemask;
12533
12534                 for (j = 0; j < pagesize; j += 4) {
12535                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12536                                                   (__be32 *) (tmp + j));
12537                         if (ret)
12538                                 break;
12539                 }
12540                 if (ret)
12541                         break;
12542
12543                 page_off = offset & pagemask;
12544                 size = pagesize;
12545                 if (len < size)
12546                         size = len;
12547
12548                 len -= size;
12549
12550                 memcpy(tmp + page_off, buf, size);
12551
12552                 offset = offset + (pagesize - page_off);
12553
12554                 tg3_enable_nvram_access(tp);
12555
12556                 /*
12557                  * Before we can erase the flash page, we need
12558                  * to issue a special "write enable" command.
12559                  */
12560                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12561
12562                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12563                         break;
12564
12565                 /* Erase the target page */
12566                 tw32(NVRAM_ADDR, phy_addr);
12567
12568                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12569                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12570
12571                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12572                         break;
12573
12574                 /* Issue another write enable to start the write. */
12575                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12576
12577                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12578                         break;
12579
12580                 for (j = 0; j < pagesize; j += 4) {
12581                         __be32 data;
12582
12583                         data = *((__be32 *) (tmp + j));
12584
12585                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12586
12587                         tw32(NVRAM_ADDR, phy_addr + j);
12588
12589                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12590                                 NVRAM_CMD_WR;
12591
12592                         if (j == 0)
12593                                 nvram_cmd |= NVRAM_CMD_FIRST;
12594                         else if (j == (pagesize - 4))
12595                                 nvram_cmd |= NVRAM_CMD_LAST;
12596
12597                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12598                                 break;
12599                 }
12600                 if (ret)
12601                         break;
12602         }
12603
12604         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12605         tg3_nvram_exec_cmd(tp, nvram_cmd);
12606
12607         kfree(tmp);
12608
12609         return ret;
12610 }
12611
12612 /* offset and length are dword aligned */
12613 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12614                 u8 *buf)
12615 {
12616         int i, ret = 0;
12617
12618         for (i = 0; i < len; i += 4, offset += 4) {
12619                 u32 page_off, phy_addr, nvram_cmd;
12620                 __be32 data;
12621
12622                 memcpy(&data, buf + i, 4);
12623                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12624
12625                 page_off = offset % tp->nvram_pagesize;
12626
12627                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12628
12629                 tw32(NVRAM_ADDR, phy_addr);
12630
12631                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12632
12633                 if (page_off == 0 || i == 0)
12634                         nvram_cmd |= NVRAM_CMD_FIRST;
12635                 if (page_off == (tp->nvram_pagesize - 4))
12636                         nvram_cmd |= NVRAM_CMD_LAST;
12637
12638                 if (i == (len - 4))
12639                         nvram_cmd |= NVRAM_CMD_LAST;
12640
12641                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12642                     !tg3_flag(tp, 5755_PLUS) &&
12643                     (tp->nvram_jedecnum == JEDEC_ST) &&
12644                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12645
12646                         if ((ret = tg3_nvram_exec_cmd(tp,
12647                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12648                                 NVRAM_CMD_DONE)))
12649
12650                                 break;
12651                 }
12652                 if (!tg3_flag(tp, FLASH)) {
12653                         /* We always do complete word writes to eeprom. */
12654                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12655                 }
12656
12657                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12658                         break;
12659         }
12660         return ret;
12661 }
12662
12663 /* offset and length are dword aligned */
12664 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12665 {
12666         int ret;
12667
12668         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12669                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12670                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12671                 udelay(40);
12672         }
12673
12674         if (!tg3_flag(tp, NVRAM)) {
12675                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12676         } else {
12677                 u32 grc_mode;
12678
12679                 ret = tg3_nvram_lock(tp);
12680                 if (ret)
12681                         return ret;
12682
12683                 tg3_enable_nvram_access(tp);
12684                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12685                         tw32(NVRAM_WRITE1, 0x406);
12686
12687                 grc_mode = tr32(GRC_MODE);
12688                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12689
12690                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12691                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12692                                 buf);
12693                 } else {
12694                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12695                                 buf);
12696                 }
12697
12698                 grc_mode = tr32(GRC_MODE);
12699                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12700
12701                 tg3_disable_nvram_access(tp);
12702                 tg3_nvram_unlock(tp);
12703         }
12704
12705         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12706                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12707                 udelay(40);
12708         }
12709
12710         return ret;
12711 }
12712
12713 struct subsys_tbl_ent {
12714         u16 subsys_vendor, subsys_devid;
12715         u32 phy_id;
12716 };
12717
12718 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12719         /* Broadcom boards. */
12720         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12721           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12722         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12723           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12724         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12725           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12726         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12727           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12728         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12729           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12730         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12731           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12732         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12733           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12734         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12735           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12736         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12737           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12738         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12739           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12740         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12741           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12742
12743         /* 3com boards. */
12744         { TG3PCI_SUBVENDOR_ID_3COM,
12745           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12746         { TG3PCI_SUBVENDOR_ID_3COM,
12747           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12748         { TG3PCI_SUBVENDOR_ID_3COM,
12749           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12750         { TG3PCI_SUBVENDOR_ID_3COM,
12751           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12752         { TG3PCI_SUBVENDOR_ID_3COM,
12753           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12754
12755         /* DELL boards. */
12756         { TG3PCI_SUBVENDOR_ID_DELL,
12757           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12758         { TG3PCI_SUBVENDOR_ID_DELL,
12759           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12760         { TG3PCI_SUBVENDOR_ID_DELL,
12761           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12762         { TG3PCI_SUBVENDOR_ID_DELL,
12763           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12764
12765         /* Compaq boards. */
12766         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12767           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12768         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12769           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12770         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12771           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12772         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12773           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12774         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12775           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12776
12777         /* IBM boards. */
12778         { TG3PCI_SUBVENDOR_ID_IBM,
12779           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12780 };
12781
12782 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12783 {
12784         int i;
12785
12786         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12787                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12788                      tp->pdev->subsystem_vendor) &&
12789                     (subsys_id_to_phy_id[i].subsys_devid ==
12790                      tp->pdev->subsystem_device))
12791                         return &subsys_id_to_phy_id[i];
12792         }
12793         return NULL;
12794 }
12795
12796 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12797 {
12798         u32 val;
12799
12800         tp->phy_id = TG3_PHY_ID_INVALID;
12801         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12802
12803         /* Assume an onboard device and WOL capable by default.  */
12804         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12805         tg3_flag_set(tp, WOL_CAP);
12806
12807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12808                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12809                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12810                         tg3_flag_set(tp, IS_NIC);
12811                 }
12812                 val = tr32(VCPU_CFGSHDW);
12813                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12814                         tg3_flag_set(tp, ASPM_WORKAROUND);
12815                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12816                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12817                         tg3_flag_set(tp, WOL_ENABLE);
12818                         device_set_wakeup_enable(&tp->pdev->dev, true);
12819                 }
12820                 goto done;
12821         }
12822
12823         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12824         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12825                 u32 nic_cfg, led_cfg;
12826                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12827                 int eeprom_phy_serdes = 0;
12828
12829                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12830                 tp->nic_sram_data_cfg = nic_cfg;
12831
12832                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12833                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12834                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12835                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12836                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12837                     (ver > 0) && (ver < 0x100))
12838                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12839
12840                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12841                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12842
12843                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12844                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12845                         eeprom_phy_serdes = 1;
12846
12847                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12848                 if (nic_phy_id != 0) {
12849                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12850                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12851
12852                         eeprom_phy_id  = (id1 >> 16) << 10;
12853                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12854                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12855                 } else
12856                         eeprom_phy_id = 0;
12857
12858                 tp->phy_id = eeprom_phy_id;
12859                 if (eeprom_phy_serdes) {
12860                         if (!tg3_flag(tp, 5705_PLUS))
12861                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12862                         else
12863                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12864                 }
12865
12866                 if (tg3_flag(tp, 5750_PLUS))
12867                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12868                                     SHASTA_EXT_LED_MODE_MASK);
12869                 else
12870                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12871
12872                 switch (led_cfg) {
12873                 default:
12874                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12875                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12876                         break;
12877
12878                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12879                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12880                         break;
12881
12882                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12883                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12884
12885                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12886                          * read on some older 5700/5701 bootcode.
12887                          */
12888                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12889                             ASIC_REV_5700 ||
12890                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12891                             ASIC_REV_5701)
12892                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12893
12894                         break;
12895
12896                 case SHASTA_EXT_LED_SHARED:
12897                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12898                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12899                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12900                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12901                                                  LED_CTRL_MODE_PHY_2);
12902                         break;
12903
12904                 case SHASTA_EXT_LED_MAC:
12905                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12906                         break;
12907
12908                 case SHASTA_EXT_LED_COMBO:
12909                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12910                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12911                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12912                                                  LED_CTRL_MODE_PHY_2);
12913                         break;
12914
12915                 }
12916
12917                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12918                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12919                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12920                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12921
12922                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12923                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12924
12925                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12926                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12927                         if ((tp->pdev->subsystem_vendor ==
12928                              PCI_VENDOR_ID_ARIMA) &&
12929                             (tp->pdev->subsystem_device == 0x205a ||
12930                              tp->pdev->subsystem_device == 0x2063))
12931                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12932                 } else {
12933                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12934                         tg3_flag_set(tp, IS_NIC);
12935                 }
12936
12937                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12938                         tg3_flag_set(tp, ENABLE_ASF);
12939                         if (tg3_flag(tp, 5750_PLUS))
12940                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12941                 }
12942
12943                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12944                     tg3_flag(tp, 5750_PLUS))
12945                         tg3_flag_set(tp, ENABLE_APE);
12946
12947                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12948                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12949                         tg3_flag_clear(tp, WOL_CAP);
12950
12951                 if (tg3_flag(tp, WOL_CAP) &&
12952                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12953                         tg3_flag_set(tp, WOL_ENABLE);
12954                         device_set_wakeup_enable(&tp->pdev->dev, true);
12955                 }
12956
12957                 if (cfg2 & (1 << 17))
12958                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12959
12960                 /* serdes signal pre-emphasis in register 0x590 set by */
12961                 /* bootcode if bit 18 is set */
12962                 if (cfg2 & (1 << 18))
12963                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12964
12965                 if ((tg3_flag(tp, 57765_PLUS) ||
12966                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12967                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12968                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12969                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12970
12971                 if (tg3_flag(tp, PCI_EXPRESS) &&
12972                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12973                     !tg3_flag(tp, 57765_PLUS)) {
12974                         u32 cfg3;
12975
12976                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12977                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12978                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12979                 }
12980
12981                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12982                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12983                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12984                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12985                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12986                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12987         }
12988 done:
12989         if (tg3_flag(tp, WOL_CAP))
12990                 device_set_wakeup_enable(&tp->pdev->dev,
12991                                          tg3_flag(tp, WOL_ENABLE));
12992         else
12993                 device_set_wakeup_capable(&tp->pdev->dev, false);
12994 }
12995
12996 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12997 {
12998         int i;
12999         u32 val;
13000
13001         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13002         tw32(OTP_CTRL, cmd);
13003
13004         /* Wait for up to 1 ms for command to execute. */
13005         for (i = 0; i < 100; i++) {
13006                 val = tr32(OTP_STATUS);
13007                 if (val & OTP_STATUS_CMD_DONE)
13008                         break;
13009                 udelay(10);
13010         }
13011
13012         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13013 }
13014
13015 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13016  * configuration is a 32-bit value that straddles the alignment boundary.
13017  * We do two 32-bit reads and then shift and merge the results.
13018  */
13019 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13020 {
13021         u32 bhalf_otp, thalf_otp;
13022
13023         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13024
13025         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13026                 return 0;
13027
13028         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13029
13030         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13031                 return 0;
13032
13033         thalf_otp = tr32(OTP_READ_DATA);
13034
13035         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13036
13037         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13038                 return 0;
13039
13040         bhalf_otp = tr32(OTP_READ_DATA);
13041
13042         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13043 }
13044
13045 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13046 {
13047         u32 adv = ADVERTISED_Autoneg |
13048                   ADVERTISED_Pause;
13049
13050         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13051                 adv |= ADVERTISED_1000baseT_Half |
13052                        ADVERTISED_1000baseT_Full;
13053
13054         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13055                 adv |= ADVERTISED_100baseT_Half |
13056                        ADVERTISED_100baseT_Full |
13057                        ADVERTISED_10baseT_Half |
13058                        ADVERTISED_10baseT_Full |
13059                        ADVERTISED_TP;
13060         else
13061                 adv |= ADVERTISED_FIBRE;
13062
13063         tp->link_config.advertising = adv;
13064         tp->link_config.speed = SPEED_INVALID;
13065         tp->link_config.duplex = DUPLEX_INVALID;
13066         tp->link_config.autoneg = AUTONEG_ENABLE;
13067         tp->link_config.active_speed = SPEED_INVALID;
13068         tp->link_config.active_duplex = DUPLEX_INVALID;
13069         tp->link_config.orig_speed = SPEED_INVALID;
13070         tp->link_config.orig_duplex = DUPLEX_INVALID;
13071         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13072 }
13073
13074 static int __devinit tg3_phy_probe(struct tg3 *tp)
13075 {
13076         u32 hw_phy_id_1, hw_phy_id_2;
13077         u32 hw_phy_id, hw_phy_id_masked;
13078         int err;
13079
13080         /* flow control autonegotiation is default behavior */
13081         tg3_flag_set(tp, PAUSE_AUTONEG);
13082         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13083
13084         if (tg3_flag(tp, USE_PHYLIB))
13085                 return tg3_phy_init(tp);
13086
13087         /* Reading the PHY ID register can conflict with ASF
13088          * firmware access to the PHY hardware.
13089          */
13090         err = 0;
13091         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13092                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13093         } else {
13094                 /* Now read the physical PHY_ID from the chip and verify
13095                  * that it is sane.  If it doesn't look good, we fall back
13096                  * to either the hard-coded table based PHY_ID and failing
13097                  * that the value found in the eeprom area.
13098                  */
13099                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13100                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13101
13102                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13103                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13104                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13105
13106                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13107         }
13108
13109         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13110                 tp->phy_id = hw_phy_id;
13111                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13112                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13113                 else
13114                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13115         } else {
13116                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13117                         /* Do nothing, phy ID already set up in
13118                          * tg3_get_eeprom_hw_cfg().
13119                          */
13120                 } else {
13121                         struct subsys_tbl_ent *p;
13122
13123                         /* No eeprom signature?  Try the hardcoded
13124                          * subsys device table.
13125                          */
13126                         p = tg3_lookup_by_subsys(tp);
13127                         if (!p)
13128                                 return -ENODEV;
13129
13130                         tp->phy_id = p->phy_id;
13131                         if (!tp->phy_id ||
13132                             tp->phy_id == TG3_PHY_ID_BCM8002)
13133                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13134                 }
13135         }
13136
13137         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13138             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13139               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13140              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13141               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13142                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13143
13144         tg3_phy_init_link_config(tp);
13145
13146         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13147             !tg3_flag(tp, ENABLE_APE) &&
13148             !tg3_flag(tp, ENABLE_ASF)) {
13149                 u32 bmsr, mask;
13150
13151                 tg3_readphy(tp, MII_BMSR, &bmsr);
13152                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13153                     (bmsr & BMSR_LSTATUS))
13154                         goto skip_phy_reset;
13155
13156                 err = tg3_phy_reset(tp);
13157                 if (err)
13158                         return err;
13159
13160                 tg3_phy_set_wirespeed(tp);
13161
13162                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13163                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13164                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13165                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13166                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13167                                             tp->link_config.flowctrl);
13168
13169                         tg3_writephy(tp, MII_BMCR,
13170                                      BMCR_ANENABLE | BMCR_ANRESTART);
13171                 }
13172         }
13173
13174 skip_phy_reset:
13175         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13176                 err = tg3_init_5401phy_dsp(tp);
13177                 if (err)
13178                         return err;
13179
13180                 err = tg3_init_5401phy_dsp(tp);
13181         }
13182
13183         return err;
13184 }
13185
13186 static void __devinit tg3_read_vpd(struct tg3 *tp)
13187 {
13188         u8 *vpd_data;
13189         unsigned int block_end, rosize, len;
13190         int j, i = 0;
13191
13192         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13193         if (!vpd_data)
13194                 goto out_no_vpd;
13195
13196         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13197                              PCI_VPD_LRDT_RO_DATA);
13198         if (i < 0)
13199                 goto out_not_found;
13200
13201         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13202         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13203         i += PCI_VPD_LRDT_TAG_SIZE;
13204
13205         if (block_end > TG3_NVM_VPD_LEN)
13206                 goto out_not_found;
13207
13208         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13209                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13210         if (j > 0) {
13211                 len = pci_vpd_info_field_size(&vpd_data[j]);
13212
13213                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13214                 if (j + len > block_end || len != 4 ||
13215                     memcmp(&vpd_data[j], "1028", 4))
13216                         goto partno;
13217
13218                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13219                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13220                 if (j < 0)
13221                         goto partno;
13222
13223                 len = pci_vpd_info_field_size(&vpd_data[j]);
13224
13225                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13226                 if (j + len > block_end)
13227                         goto partno;
13228
13229                 memcpy(tp->fw_ver, &vpd_data[j], len);
13230                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13231         }
13232
13233 partno:
13234         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13235                                       PCI_VPD_RO_KEYWORD_PARTNO);
13236         if (i < 0)
13237                 goto out_not_found;
13238
13239         len = pci_vpd_info_field_size(&vpd_data[i]);
13240
13241         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13242         if (len > TG3_BPN_SIZE ||
13243             (len + i) > TG3_NVM_VPD_LEN)
13244                 goto out_not_found;
13245
13246         memcpy(tp->board_part_number, &vpd_data[i], len);
13247
13248 out_not_found:
13249         kfree(vpd_data);
13250         if (tp->board_part_number[0])
13251                 return;
13252
13253 out_no_vpd:
13254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13255                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13256                         strcpy(tp->board_part_number, "BCM5717");
13257                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13258                         strcpy(tp->board_part_number, "BCM5718");
13259                 else
13260                         goto nomatch;
13261         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13262                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13263                         strcpy(tp->board_part_number, "BCM57780");
13264                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13265                         strcpy(tp->board_part_number, "BCM57760");
13266                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13267                         strcpy(tp->board_part_number, "BCM57790");
13268                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13269                         strcpy(tp->board_part_number, "BCM57788");
13270                 else
13271                         goto nomatch;
13272         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13273                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13274                         strcpy(tp->board_part_number, "BCM57761");
13275                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13276                         strcpy(tp->board_part_number, "BCM57765");
13277                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13278                         strcpy(tp->board_part_number, "BCM57781");
13279                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13280                         strcpy(tp->board_part_number, "BCM57785");
13281                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13282                         strcpy(tp->board_part_number, "BCM57791");
13283                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13284                         strcpy(tp->board_part_number, "BCM57795");
13285                 else
13286                         goto nomatch;
13287         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13288                 strcpy(tp->board_part_number, "BCM95906");
13289         } else {
13290 nomatch:
13291                 strcpy(tp->board_part_number, "none");
13292         }
13293 }
13294
13295 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13296 {
13297         u32 val;
13298
13299         if (tg3_nvram_read(tp, offset, &val) ||
13300             (val & 0xfc000000) != 0x0c000000 ||
13301             tg3_nvram_read(tp, offset + 4, &val) ||
13302             val != 0)
13303                 return 0;
13304
13305         return 1;
13306 }
13307
13308 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13309 {
13310         u32 val, offset, start, ver_offset;
13311         int i, dst_off;
13312         bool newver = false;
13313
13314         if (tg3_nvram_read(tp, 0xc, &offset) ||
13315             tg3_nvram_read(tp, 0x4, &start))
13316                 return;
13317
13318         offset = tg3_nvram_logical_addr(tp, offset);
13319
13320         if (tg3_nvram_read(tp, offset, &val))
13321                 return;
13322
13323         if ((val & 0xfc000000) == 0x0c000000) {
13324                 if (tg3_nvram_read(tp, offset + 4, &val))
13325                         return;
13326
13327                 if (val == 0)
13328                         newver = true;
13329         }
13330
13331         dst_off = strlen(tp->fw_ver);
13332
13333         if (newver) {
13334                 if (TG3_VER_SIZE - dst_off < 16 ||
13335                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13336                         return;
13337
13338                 offset = offset + ver_offset - start;
13339                 for (i = 0; i < 16; i += 4) {
13340                         __be32 v;
13341                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13342                                 return;
13343
13344                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13345                 }
13346         } else {
13347                 u32 major, minor;
13348
13349                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13350                         return;
13351
13352                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13353                         TG3_NVM_BCVER_MAJSFT;
13354                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13355                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13356                          "v%d.%02d", major, minor);
13357         }
13358 }
13359
13360 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13361 {
13362         u32 val, major, minor;
13363
13364         /* Use native endian representation */
13365         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13366                 return;
13367
13368         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13369                 TG3_NVM_HWSB_CFG1_MAJSFT;
13370         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13371                 TG3_NVM_HWSB_CFG1_MINSFT;
13372
13373         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13374 }
13375
13376 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13377 {
13378         u32 offset, major, minor, build;
13379
13380         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13381
13382         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13383                 return;
13384
13385         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13386         case TG3_EEPROM_SB_REVISION_0:
13387                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13388                 break;
13389         case TG3_EEPROM_SB_REVISION_2:
13390                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13391                 break;
13392         case TG3_EEPROM_SB_REVISION_3:
13393                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13394                 break;
13395         case TG3_EEPROM_SB_REVISION_4:
13396                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13397                 break;
13398         case TG3_EEPROM_SB_REVISION_5:
13399                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13400                 break;
13401         case TG3_EEPROM_SB_REVISION_6:
13402                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13403                 break;
13404         default:
13405                 return;
13406         }
13407
13408         if (tg3_nvram_read(tp, offset, &val))
13409                 return;
13410
13411         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13412                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13413         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13414                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13415         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13416
13417         if (minor > 99 || build > 26)
13418                 return;
13419
13420         offset = strlen(tp->fw_ver);
13421         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13422                  " v%d.%02d", major, minor);
13423
13424         if (build > 0) {
13425                 offset = strlen(tp->fw_ver);
13426                 if (offset < TG3_VER_SIZE - 1)
13427                         tp->fw_ver[offset] = 'a' + build - 1;
13428         }
13429 }
13430
13431 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13432 {
13433         u32 val, offset, start;
13434         int i, vlen;
13435
13436         for (offset = TG3_NVM_DIR_START;
13437              offset < TG3_NVM_DIR_END;
13438              offset += TG3_NVM_DIRENT_SIZE) {
13439                 if (tg3_nvram_read(tp, offset, &val))
13440                         return;
13441
13442                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13443                         break;
13444         }
13445
13446         if (offset == TG3_NVM_DIR_END)
13447                 return;
13448
13449         if (!tg3_flag(tp, 5705_PLUS))
13450                 start = 0x08000000;
13451         else if (tg3_nvram_read(tp, offset - 4, &start))
13452                 return;
13453
13454         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13455             !tg3_fw_img_is_valid(tp, offset) ||
13456             tg3_nvram_read(tp, offset + 8, &val))
13457                 return;
13458
13459         offset += val - start;
13460
13461         vlen = strlen(tp->fw_ver);
13462
13463         tp->fw_ver[vlen++] = ',';
13464         tp->fw_ver[vlen++] = ' ';
13465
13466         for (i = 0; i < 4; i++) {
13467                 __be32 v;
13468                 if (tg3_nvram_read_be32(tp, offset, &v))
13469                         return;
13470
13471                 offset += sizeof(v);
13472
13473                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13474                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13475                         break;
13476                 }
13477
13478                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13479                 vlen += sizeof(v);
13480         }
13481 }
13482
13483 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13484 {
13485         int vlen;
13486         u32 apedata;
13487         char *fwtype;
13488
13489         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13490                 return;
13491
13492         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13493         if (apedata != APE_SEG_SIG_MAGIC)
13494                 return;
13495
13496         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13497         if (!(apedata & APE_FW_STATUS_READY))
13498                 return;
13499
13500         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13501
13502         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13503                 tg3_flag_set(tp, APE_HAS_NCSI);
13504                 fwtype = "NCSI";
13505         } else {
13506                 fwtype = "DASH";
13507         }
13508
13509         vlen = strlen(tp->fw_ver);
13510
13511         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13512                  fwtype,
13513                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13514                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13515                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13516                  (apedata & APE_FW_VERSION_BLDMSK));
13517 }
13518
13519 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13520 {
13521         u32 val;
13522         bool vpd_vers = false;
13523
13524         if (tp->fw_ver[0] != 0)
13525                 vpd_vers = true;
13526
13527         if (tg3_flag(tp, NO_NVRAM)) {
13528                 strcat(tp->fw_ver, "sb");
13529                 return;
13530         }
13531
13532         if (tg3_nvram_read(tp, 0, &val))
13533                 return;
13534
13535         if (val == TG3_EEPROM_MAGIC)
13536                 tg3_read_bc_ver(tp);
13537         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13538                 tg3_read_sb_ver(tp, val);
13539         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13540                 tg3_read_hwsb_ver(tp);
13541         else
13542                 return;
13543
13544         if (vpd_vers)
13545                 goto done;
13546
13547         if (tg3_flag(tp, ENABLE_APE)) {
13548                 if (tg3_flag(tp, ENABLE_ASF))
13549                         tg3_read_dash_ver(tp);
13550         } else if (tg3_flag(tp, ENABLE_ASF)) {
13551                 tg3_read_mgmtfw_ver(tp);
13552         }
13553
13554 done:
13555         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13556 }
13557
13558 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13559
13560 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13561 {
13562         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13563                 return TG3_RX_RET_MAX_SIZE_5717;
13564         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13565                 return TG3_RX_RET_MAX_SIZE_5700;
13566         else
13567                 return TG3_RX_RET_MAX_SIZE_5705;
13568 }
13569
13570 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13571         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13572         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13573         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13574         { },
13575 };
13576
13577 static int __devinit tg3_get_invariants(struct tg3 *tp)
13578 {
13579         u32 misc_ctrl_reg;
13580         u32 pci_state_reg, grc_misc_cfg;
13581         u32 val;
13582         u16 pci_cmd;
13583         int err;
13584
13585         /* Force memory write invalidate off.  If we leave it on,
13586          * then on 5700_BX chips we have to enable a workaround.
13587          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13588          * to match the cacheline size.  The Broadcom driver have this
13589          * workaround but turns MWI off all the times so never uses
13590          * it.  This seems to suggest that the workaround is insufficient.
13591          */
13592         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13593         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13594         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13595
13596         /* Important! -- Make sure register accesses are byteswapped
13597          * correctly.  Also, for those chips that require it, make
13598          * sure that indirect register accesses are enabled before
13599          * the first operation.
13600          */
13601         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13602                               &misc_ctrl_reg);
13603         tp->misc_host_ctrl |= (misc_ctrl_reg &
13604                                MISC_HOST_CTRL_CHIPREV);
13605         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13606                                tp->misc_host_ctrl);
13607
13608         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13609                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13611                 u32 prod_id_asic_rev;
13612
13613                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13614                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13615                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13616                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13617                         pci_read_config_dword(tp->pdev,
13618                                               TG3PCI_GEN2_PRODID_ASICREV,
13619                                               &prod_id_asic_rev);
13620                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13621                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13622                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13623                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13624                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13625                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13626                         pci_read_config_dword(tp->pdev,
13627                                               TG3PCI_GEN15_PRODID_ASICREV,
13628                                               &prod_id_asic_rev);
13629                 else
13630                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13631                                               &prod_id_asic_rev);
13632
13633                 tp->pci_chip_rev_id = prod_id_asic_rev;
13634         }
13635
13636         /* Wrong chip ID in 5752 A0. This code can be removed later
13637          * as A0 is not in production.
13638          */
13639         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13640                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13641
13642         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13643          * we need to disable memory and use config. cycles
13644          * only to access all registers. The 5702/03 chips
13645          * can mistakenly decode the special cycles from the
13646          * ICH chipsets as memory write cycles, causing corruption
13647          * of register and memory space. Only certain ICH bridges
13648          * will drive special cycles with non-zero data during the
13649          * address phase which can fall within the 5703's address
13650          * range. This is not an ICH bug as the PCI spec allows
13651          * non-zero address during special cycles. However, only
13652          * these ICH bridges are known to drive non-zero addresses
13653          * during special cycles.
13654          *
13655          * Since special cycles do not cross PCI bridges, we only
13656          * enable this workaround if the 5703 is on the secondary
13657          * bus of these ICH bridges.
13658          */
13659         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13660             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13661                 static struct tg3_dev_id {
13662                         u32     vendor;
13663                         u32     device;
13664                         u32     rev;
13665                 } ich_chipsets[] = {
13666                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13667                           PCI_ANY_ID },
13668                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13669                           PCI_ANY_ID },
13670                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13671                           0xa },
13672                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13673                           PCI_ANY_ID },
13674                         { },
13675                 };
13676                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13677                 struct pci_dev *bridge = NULL;
13678
13679                 while (pci_id->vendor != 0) {
13680                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13681                                                 bridge);
13682                         if (!bridge) {
13683                                 pci_id++;
13684                                 continue;
13685                         }
13686                         if (pci_id->rev != PCI_ANY_ID) {
13687                                 if (bridge->revision > pci_id->rev)
13688                                         continue;
13689                         }
13690                         if (bridge->subordinate &&
13691                             (bridge->subordinate->number ==
13692                              tp->pdev->bus->number)) {
13693                                 tg3_flag_set(tp, ICH_WORKAROUND);
13694                                 pci_dev_put(bridge);
13695                                 break;
13696                         }
13697                 }
13698         }
13699
13700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13701                 static struct tg3_dev_id {
13702                         u32     vendor;
13703                         u32     device;
13704                 } bridge_chipsets[] = {
13705                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13706                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13707                         { },
13708                 };
13709                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13710                 struct pci_dev *bridge = NULL;
13711
13712                 while (pci_id->vendor != 0) {
13713                         bridge = pci_get_device(pci_id->vendor,
13714                                                 pci_id->device,
13715                                                 bridge);
13716                         if (!bridge) {
13717                                 pci_id++;
13718                                 continue;
13719                         }
13720                         if (bridge->subordinate &&
13721                             (bridge->subordinate->number <=
13722                              tp->pdev->bus->number) &&
13723                             (bridge->subordinate->subordinate >=
13724                              tp->pdev->bus->number)) {
13725                                 tg3_flag_set(tp, 5701_DMA_BUG);
13726                                 pci_dev_put(bridge);
13727                                 break;
13728                         }
13729                 }
13730         }
13731
13732         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13733          * DMA addresses > 40-bit. This bridge may have other additional
13734          * 57xx devices behind it in some 4-port NIC designs for example.
13735          * Any tg3 device found behind the bridge will also need the 40-bit
13736          * DMA workaround.
13737          */
13738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13739             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13740                 tg3_flag_set(tp, 5780_CLASS);
13741                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13742                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13743         } else {
13744                 struct pci_dev *bridge = NULL;
13745
13746                 do {
13747                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13748                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13749                                                 bridge);
13750                         if (bridge && bridge->subordinate &&
13751                             (bridge->subordinate->number <=
13752                              tp->pdev->bus->number) &&
13753                             (bridge->subordinate->subordinate >=
13754                              tp->pdev->bus->number)) {
13755                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13756                                 pci_dev_put(bridge);
13757                                 break;
13758                         }
13759                 } while (bridge);
13760         }
13761
13762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13763             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13764                 tp->pdev_peer = tg3_find_peer(tp);
13765
13766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13768             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13769                 tg3_flag_set(tp, 5717_PLUS);
13770
13771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13772             tg3_flag(tp, 5717_PLUS))
13773                 tg3_flag_set(tp, 57765_PLUS);
13774
13775         /* Intentionally exclude ASIC_REV_5906 */
13776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13777             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13778             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13780             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13781             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13782             tg3_flag(tp, 57765_PLUS))
13783                 tg3_flag_set(tp, 5755_PLUS);
13784
13785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13787             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13788             tg3_flag(tp, 5755_PLUS) ||
13789             tg3_flag(tp, 5780_CLASS))
13790                 tg3_flag_set(tp, 5750_PLUS);
13791
13792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13793             tg3_flag(tp, 5750_PLUS))
13794                 tg3_flag_set(tp, 5705_PLUS);
13795
13796         /* Determine TSO capabilities */
13797         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13798                 ; /* Do nothing. HW bug. */
13799         else if (tg3_flag(tp, 57765_PLUS))
13800                 tg3_flag_set(tp, HW_TSO_3);
13801         else if (tg3_flag(tp, 5755_PLUS) ||
13802                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13803                 tg3_flag_set(tp, HW_TSO_2);
13804         else if (tg3_flag(tp, 5750_PLUS)) {
13805                 tg3_flag_set(tp, HW_TSO_1);
13806                 tg3_flag_set(tp, TSO_BUG);
13807                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13808                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13809                         tg3_flag_clear(tp, TSO_BUG);
13810         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13811                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13812                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13813                         tg3_flag_set(tp, TSO_BUG);
13814                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13815                         tp->fw_needed = FIRMWARE_TG3TSO5;
13816                 else
13817                         tp->fw_needed = FIRMWARE_TG3TSO;
13818         }
13819
13820         /* Selectively allow TSO based on operating conditions */
13821         if (tg3_flag(tp, HW_TSO_1) ||
13822             tg3_flag(tp, HW_TSO_2) ||
13823             tg3_flag(tp, HW_TSO_3) ||
13824             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13825                 tg3_flag_set(tp, TSO_CAPABLE);
13826         else {
13827                 tg3_flag_clear(tp, TSO_CAPABLE);
13828                 tg3_flag_clear(tp, TSO_BUG);
13829                 tp->fw_needed = NULL;
13830         }
13831
13832         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13833                 tp->fw_needed = FIRMWARE_TG3;
13834
13835         tp->irq_max = 1;
13836
13837         if (tg3_flag(tp, 5750_PLUS)) {
13838                 tg3_flag_set(tp, SUPPORT_MSI);
13839                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13840                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13841                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13842                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13843                      tp->pdev_peer == tp->pdev))
13844                         tg3_flag_clear(tp, SUPPORT_MSI);
13845
13846                 if (tg3_flag(tp, 5755_PLUS) ||
13847                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13848                         tg3_flag_set(tp, 1SHOT_MSI);
13849                 }
13850
13851                 if (tg3_flag(tp, 57765_PLUS)) {
13852                         tg3_flag_set(tp, SUPPORT_MSIX);
13853                         tp->irq_max = TG3_IRQ_MAX_VECS;
13854                 }
13855         }
13856
13857         if (tg3_flag(tp, 5755_PLUS))
13858                 tg3_flag_set(tp, SHORT_DMA_BUG);
13859
13860         if (tg3_flag(tp, 5717_PLUS))
13861                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13862
13863         if (tg3_flag(tp, 57765_PLUS) &&
13864             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13865                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13866
13867         if (!tg3_flag(tp, 5705_PLUS) ||
13868             tg3_flag(tp, 5780_CLASS) ||
13869             tg3_flag(tp, USE_JUMBO_BDFLAG))
13870                 tg3_flag_set(tp, JUMBO_CAPABLE);
13871
13872         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13873                               &pci_state_reg);
13874
13875         if (pci_is_pcie(tp->pdev)) {
13876                 u16 lnkctl;
13877
13878                 tg3_flag_set(tp, PCI_EXPRESS);
13879
13880                 tp->pcie_readrq = 4096;
13881                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13882                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13883                         tp->pcie_readrq = 2048;
13884
13885                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13886
13887                 pci_read_config_word(tp->pdev,
13888                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13889                                      &lnkctl);
13890                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13891                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13892                             ASIC_REV_5906) {
13893                                 tg3_flag_clear(tp, HW_TSO_2);
13894                                 tg3_flag_clear(tp, TSO_CAPABLE);
13895                         }
13896                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13897                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13898                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13899                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13900                                 tg3_flag_set(tp, CLKREQ_BUG);
13901                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13902                         tg3_flag_set(tp, L1PLLPD_EN);
13903                 }
13904         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13905                 /* BCM5785 devices are effectively PCIe devices, and should
13906                  * follow PCIe codepaths, but do not have a PCIe capabilities
13907                  * section.
13908                 */
13909                 tg3_flag_set(tp, PCI_EXPRESS);
13910         } else if (!tg3_flag(tp, 5705_PLUS) ||
13911                    tg3_flag(tp, 5780_CLASS)) {
13912                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13913                 if (!tp->pcix_cap) {
13914                         dev_err(&tp->pdev->dev,
13915                                 "Cannot find PCI-X capability, aborting\n");
13916                         return -EIO;
13917                 }
13918
13919                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13920                         tg3_flag_set(tp, PCIX_MODE);
13921         }
13922
13923         /* If we have an AMD 762 or VIA K8T800 chipset, write
13924          * reordering to the mailbox registers done by the host
13925          * controller can cause major troubles.  We read back from
13926          * every mailbox register write to force the writes to be
13927          * posted to the chip in order.
13928          */
13929         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13930             !tg3_flag(tp, PCI_EXPRESS))
13931                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13932
13933         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13934                              &tp->pci_cacheline_sz);
13935         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13936                              &tp->pci_lat_timer);
13937         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13938             tp->pci_lat_timer < 64) {
13939                 tp->pci_lat_timer = 64;
13940                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13941                                       tp->pci_lat_timer);
13942         }
13943
13944         /* Important! -- It is critical that the PCI-X hw workaround
13945          * situation is decided before the first MMIO register access.
13946          */
13947         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13948                 /* 5700 BX chips need to have their TX producer index
13949                  * mailboxes written twice to workaround a bug.
13950                  */
13951                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13952
13953                 /* If we are in PCI-X mode, enable register write workaround.
13954                  *
13955                  * The workaround is to use indirect register accesses
13956                  * for all chip writes not to mailbox registers.
13957                  */
13958                 if (tg3_flag(tp, PCIX_MODE)) {
13959                         u32 pm_reg;
13960
13961                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13962
13963                         /* The chip can have it's power management PCI config
13964                          * space registers clobbered due to this bug.
13965                          * So explicitly force the chip into D0 here.
13966                          */
13967                         pci_read_config_dword(tp->pdev,
13968                                               tp->pm_cap + PCI_PM_CTRL,
13969                                               &pm_reg);
13970                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13971                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13972                         pci_write_config_dword(tp->pdev,
13973                                                tp->pm_cap + PCI_PM_CTRL,
13974                                                pm_reg);
13975
13976                         /* Also, force SERR#/PERR# in PCI command. */
13977                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13978                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13979                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13980                 }
13981         }
13982
13983         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13984                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13985         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13986                 tg3_flag_set(tp, PCI_32BIT);
13987
13988         /* Chip-specific fixup from Broadcom driver */
13989         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13990             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13991                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13992                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13993         }
13994
13995         /* Default fast path register access methods */
13996         tp->read32 = tg3_read32;
13997         tp->write32 = tg3_write32;
13998         tp->read32_mbox = tg3_read32;
13999         tp->write32_mbox = tg3_write32;
14000         tp->write32_tx_mbox = tg3_write32;
14001         tp->write32_rx_mbox = tg3_write32;
14002
14003         /* Various workaround register access methods */
14004         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14005                 tp->write32 = tg3_write_indirect_reg32;
14006         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14007                  (tg3_flag(tp, PCI_EXPRESS) &&
14008                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14009                 /*
14010                  * Back to back register writes can cause problems on these
14011                  * chips, the workaround is to read back all reg writes
14012                  * except those to mailbox regs.
14013                  *
14014                  * See tg3_write_indirect_reg32().
14015                  */
14016                 tp->write32 = tg3_write_flush_reg32;
14017         }
14018
14019         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14020                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14021                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14022                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14023         }
14024
14025         if (tg3_flag(tp, ICH_WORKAROUND)) {
14026                 tp->read32 = tg3_read_indirect_reg32;
14027                 tp->write32 = tg3_write_indirect_reg32;
14028                 tp->read32_mbox = tg3_read_indirect_mbox;
14029                 tp->write32_mbox = tg3_write_indirect_mbox;
14030                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14031                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14032
14033                 iounmap(tp->regs);
14034                 tp->regs = NULL;
14035
14036                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14037                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14038                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14039         }
14040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14041                 tp->read32_mbox = tg3_read32_mbox_5906;
14042                 tp->write32_mbox = tg3_write32_mbox_5906;
14043                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14044                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14045         }
14046
14047         if (tp->write32 == tg3_write_indirect_reg32 ||
14048             (tg3_flag(tp, PCIX_MODE) &&
14049              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14050               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14051                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14052
14053         /* The memory arbiter has to be enabled in order for SRAM accesses
14054          * to succeed.  Normally on powerup the tg3 chip firmware will make
14055          * sure it is enabled, but other entities such as system netboot
14056          * code might disable it.
14057          */
14058         val = tr32(MEMARB_MODE);
14059         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14060
14061         if (tg3_flag(tp, PCIX_MODE)) {
14062                 pci_read_config_dword(tp->pdev,
14063                                       tp->pcix_cap + PCI_X_STATUS, &val);
14064                 tp->pci_fn = val & 0x7;
14065         } else {
14066                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14067         }
14068
14069         /* Get eeprom hw config before calling tg3_set_power_state().
14070          * In particular, the TG3_FLAG_IS_NIC flag must be
14071          * determined before calling tg3_set_power_state() so that
14072          * we know whether or not to switch out of Vaux power.
14073          * When the flag is set, it means that GPIO1 is used for eeprom
14074          * write protect and also implies that it is a LOM where GPIOs
14075          * are not used to switch power.
14076          */
14077         tg3_get_eeprom_hw_cfg(tp);
14078
14079         if (tg3_flag(tp, ENABLE_APE)) {
14080                 /* Allow reads and writes to the
14081                  * APE register and memory space.
14082                  */
14083                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14084                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14085                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14086                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14087                                        pci_state_reg);
14088
14089                 tg3_ape_lock_init(tp);
14090         }
14091
14092         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14093             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14094             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14095             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14096             tg3_flag(tp, 57765_PLUS))
14097                 tg3_flag_set(tp, CPMU_PRESENT);
14098
14099         /* Set up tp->grc_local_ctrl before calling
14100          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14101          * will bring 5700's external PHY out of reset.
14102          * It is also used as eeprom write protect on LOMs.
14103          */
14104         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14106             tg3_flag(tp, EEPROM_WRITE_PROT))
14107                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14108                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14109         /* Unused GPIO3 must be driven as output on 5752 because there
14110          * are no pull-up resistors on unused GPIO pins.
14111          */
14112         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14113                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14114
14115         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14117             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14118                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14119
14120         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14121             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14122                 /* Turn off the debug UART. */
14123                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14124                 if (tg3_flag(tp, IS_NIC))
14125                         /* Keep VMain power. */
14126                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14127                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14128         }
14129
14130         /* Switch out of Vaux if it is a NIC */
14131         tg3_pwrsrc_switch_to_vmain(tp);
14132
14133         /* Derive initial jumbo mode from MTU assigned in
14134          * ether_setup() via the alloc_etherdev() call
14135          */
14136         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14137                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14138
14139         /* Determine WakeOnLan speed to use. */
14140         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14141             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14142             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14143             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14144                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14145         } else {
14146                 tg3_flag_set(tp, WOL_SPEED_100MB);
14147         }
14148
14149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14150                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14151
14152         /* A few boards don't want Ethernet@WireSpeed phy feature */
14153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14154             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14155              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14156              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14157             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14158             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14159                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14160
14161         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14162             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14163                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14164         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14165                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14166
14167         if (tg3_flag(tp, 5705_PLUS) &&
14168             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14169             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14170             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14171             !tg3_flag(tp, 57765_PLUS)) {
14172                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14173                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14174                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14175                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14176                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14177                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14178                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14179                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14180                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14181                 } else
14182                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14183         }
14184
14185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14186             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14187                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14188                 if (tp->phy_otp == 0)
14189                         tp->phy_otp = TG3_OTP_DEFAULT;
14190         }
14191
14192         if (tg3_flag(tp, CPMU_PRESENT))
14193                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14194         else
14195                 tp->mi_mode = MAC_MI_MODE_BASE;
14196
14197         tp->coalesce_mode = 0;
14198         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14199             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14200                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14201
14202         /* Set these bits to enable statistics workaround. */
14203         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14204             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14205             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14206                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14207                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14208         }
14209
14210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14212                 tg3_flag_set(tp, USE_PHYLIB);
14213
14214         err = tg3_mdio_init(tp);
14215         if (err)
14216                 return err;
14217
14218         /* Initialize data/descriptor byte/word swapping. */
14219         val = tr32(GRC_MODE);
14220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14221                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14222                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14223                         GRC_MODE_B2HRX_ENABLE |
14224                         GRC_MODE_HTX2B_ENABLE |
14225                         GRC_MODE_HOST_STACKUP);
14226         else
14227                 val &= GRC_MODE_HOST_STACKUP;
14228
14229         tw32(GRC_MODE, val | tp->grc_mode);
14230
14231         tg3_switch_clocks(tp);
14232
14233         /* Clear this out for sanity. */
14234         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14235
14236         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14237                               &pci_state_reg);
14238         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14239             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14240                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14241
14242                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14243                     chiprevid == CHIPREV_ID_5701_B0 ||
14244                     chiprevid == CHIPREV_ID_5701_B2 ||
14245                     chiprevid == CHIPREV_ID_5701_B5) {
14246                         void __iomem *sram_base;
14247
14248                         /* Write some dummy words into the SRAM status block
14249                          * area, see if it reads back correctly.  If the return
14250                          * value is bad, force enable the PCIX workaround.
14251                          */
14252                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14253
14254                         writel(0x00000000, sram_base);
14255                         writel(0x00000000, sram_base + 4);
14256                         writel(0xffffffff, sram_base + 4);
14257                         if (readl(sram_base) != 0x00000000)
14258                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14259                 }
14260         }
14261
14262         udelay(50);
14263         tg3_nvram_init(tp);
14264
14265         grc_misc_cfg = tr32(GRC_MISC_CFG);
14266         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14267
14268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14269             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14270              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14271                 tg3_flag_set(tp, IS_5788);
14272
14273         if (!tg3_flag(tp, IS_5788) &&
14274             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14275                 tg3_flag_set(tp, TAGGED_STATUS);
14276         if (tg3_flag(tp, TAGGED_STATUS)) {
14277                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14278                                       HOSTCC_MODE_CLRTICK_TXBD);
14279
14280                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14281                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14282                                        tp->misc_host_ctrl);
14283         }
14284
14285         /* Preserve the APE MAC_MODE bits */
14286         if (tg3_flag(tp, ENABLE_APE))
14287                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14288         else
14289                 tp->mac_mode = TG3_DEF_MAC_MODE;
14290
14291         /* these are limited to 10/100 only */
14292         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14293              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14294             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14295              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14296              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14297               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14298               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14299             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14300              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14301               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14302               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14303             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14304             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14305             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14306             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14307                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14308
14309         err = tg3_phy_probe(tp);
14310         if (err) {
14311                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14312                 /* ... but do not return immediately ... */
14313                 tg3_mdio_fini(tp);
14314         }
14315
14316         tg3_read_vpd(tp);
14317         tg3_read_fw_ver(tp);
14318
14319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14320                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14321         } else {
14322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14323                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14324                 else
14325                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14326         }
14327
14328         /* 5700 {AX,BX} chips have a broken status block link
14329          * change bit implementation, so we must use the
14330          * status register in those cases.
14331          */
14332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14333                 tg3_flag_set(tp, USE_LINKCHG_REG);
14334         else
14335                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14336
14337         /* The led_ctrl is set during tg3_phy_probe, here we might
14338          * have to force the link status polling mechanism based
14339          * upon subsystem IDs.
14340          */
14341         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14342             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14343             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14344                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14345                 tg3_flag_set(tp, USE_LINKCHG_REG);
14346         }
14347
14348         /* For all SERDES we poll the MAC status register. */
14349         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14350                 tg3_flag_set(tp, POLL_SERDES);
14351         else
14352                 tg3_flag_clear(tp, POLL_SERDES);
14353
14354         tp->rx_offset = NET_IP_ALIGN;
14355         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14357             tg3_flag(tp, PCIX_MODE)) {
14358                 tp->rx_offset = 0;
14359 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14360                 tp->rx_copy_thresh = ~(u16)0;
14361 #endif
14362         }
14363
14364         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14365         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14366         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14367
14368         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14369
14370         /* Increment the rx prod index on the rx std ring by at most
14371          * 8 for these chips to workaround hw errata.
14372          */
14373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14374             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14376                 tp->rx_std_max_post = 8;
14377
14378         if (tg3_flag(tp, ASPM_WORKAROUND))
14379                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14380                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14381
14382         return err;
14383 }
14384
14385 #ifdef CONFIG_SPARC
14386 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14387 {
14388         struct net_device *dev = tp->dev;
14389         struct pci_dev *pdev = tp->pdev;
14390         struct device_node *dp = pci_device_to_OF_node(pdev);
14391         const unsigned char *addr;
14392         int len;
14393
14394         addr = of_get_property(dp, "local-mac-address", &len);
14395         if (addr && len == 6) {
14396                 memcpy(dev->dev_addr, addr, 6);
14397                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14398                 return 0;
14399         }
14400         return -ENODEV;
14401 }
14402
14403 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14404 {
14405         struct net_device *dev = tp->dev;
14406
14407         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14408         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14409         return 0;
14410 }
14411 #endif
14412
14413 static int __devinit tg3_get_device_address(struct tg3 *tp)
14414 {
14415         struct net_device *dev = tp->dev;
14416         u32 hi, lo, mac_offset;
14417         int addr_ok = 0;
14418
14419 #ifdef CONFIG_SPARC
14420         if (!tg3_get_macaddr_sparc(tp))
14421                 return 0;
14422 #endif
14423
14424         mac_offset = 0x7c;
14425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14426             tg3_flag(tp, 5780_CLASS)) {
14427                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14428                         mac_offset = 0xcc;
14429                 if (tg3_nvram_lock(tp))
14430                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14431                 else
14432                         tg3_nvram_unlock(tp);
14433         } else if (tg3_flag(tp, 5717_PLUS)) {
14434                 if (tp->pci_fn & 1)
14435                         mac_offset = 0xcc;
14436                 if (tp->pci_fn > 1)
14437                         mac_offset += 0x18c;
14438         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14439                 mac_offset = 0x10;
14440
14441         /* First try to get it from MAC address mailbox. */
14442         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14443         if ((hi >> 16) == 0x484b) {
14444                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14445                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14446
14447                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14448                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14449                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14450                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14451                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14452
14453                 /* Some old bootcode may report a 0 MAC address in SRAM */
14454                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14455         }
14456         if (!addr_ok) {
14457                 /* Next, try NVRAM. */
14458                 if (!tg3_flag(tp, NO_NVRAM) &&
14459                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14460                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14461                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14462                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14463                 }
14464                 /* Finally just fetch it out of the MAC control regs. */
14465                 else {
14466                         hi = tr32(MAC_ADDR_0_HIGH);
14467                         lo = tr32(MAC_ADDR_0_LOW);
14468
14469                         dev->dev_addr[5] = lo & 0xff;
14470                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14471                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14472                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14473                         dev->dev_addr[1] = hi & 0xff;
14474                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14475                 }
14476         }
14477
14478         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14479 #ifdef CONFIG_SPARC
14480                 if (!tg3_get_default_macaddr_sparc(tp))
14481                         return 0;
14482 #endif
14483                 return -EINVAL;
14484         }
14485         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14486         return 0;
14487 }
14488
14489 #define BOUNDARY_SINGLE_CACHELINE       1
14490 #define BOUNDARY_MULTI_CACHELINE        2
14491
14492 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14493 {
14494         int cacheline_size;
14495         u8 byte;
14496         int goal;
14497
14498         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14499         if (byte == 0)
14500                 cacheline_size = 1024;
14501         else
14502                 cacheline_size = (int) byte * 4;
14503
14504         /* On 5703 and later chips, the boundary bits have no
14505          * effect.
14506          */
14507         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14508             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14509             !tg3_flag(tp, PCI_EXPRESS))
14510                 goto out;
14511
14512 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14513         goal = BOUNDARY_MULTI_CACHELINE;
14514 #else
14515 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14516         goal = BOUNDARY_SINGLE_CACHELINE;
14517 #else
14518         goal = 0;
14519 #endif
14520 #endif
14521
14522         if (tg3_flag(tp, 57765_PLUS)) {
14523                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14524                 goto out;
14525         }
14526
14527         if (!goal)
14528                 goto out;
14529
14530         /* PCI controllers on most RISC systems tend to disconnect
14531          * when a device tries to burst across a cache-line boundary.
14532          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14533          *
14534          * Unfortunately, for PCI-E there are only limited
14535          * write-side controls for this, and thus for reads
14536          * we will still get the disconnects.  We'll also waste
14537          * these PCI cycles for both read and write for chips
14538          * other than 5700 and 5701 which do not implement the
14539          * boundary bits.
14540          */
14541         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14542                 switch (cacheline_size) {
14543                 case 16:
14544                 case 32:
14545                 case 64:
14546                 case 128:
14547                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14548                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14549                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14550                         } else {
14551                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14552                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14553                         }
14554                         break;
14555
14556                 case 256:
14557                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14558                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14559                         break;
14560
14561                 default:
14562                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14563                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14564                         break;
14565                 }
14566         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14567                 switch (cacheline_size) {
14568                 case 16:
14569                 case 32:
14570                 case 64:
14571                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14572                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14573                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14574                                 break;
14575                         }
14576                         /* fallthrough */
14577                 case 128:
14578                 default:
14579                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14580                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14581                         break;
14582                 }
14583         } else {
14584                 switch (cacheline_size) {
14585                 case 16:
14586                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14587                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14588                                         DMA_RWCTRL_WRITE_BNDRY_16);
14589                                 break;
14590                         }
14591                         /* fallthrough */
14592                 case 32:
14593                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14594                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14595                                         DMA_RWCTRL_WRITE_BNDRY_32);
14596                                 break;
14597                         }
14598                         /* fallthrough */
14599                 case 64:
14600                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14601                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14602                                         DMA_RWCTRL_WRITE_BNDRY_64);
14603                                 break;
14604                         }
14605                         /* fallthrough */
14606                 case 128:
14607                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14608                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14609                                         DMA_RWCTRL_WRITE_BNDRY_128);
14610                                 break;
14611                         }
14612                         /* fallthrough */
14613                 case 256:
14614                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14615                                 DMA_RWCTRL_WRITE_BNDRY_256);
14616                         break;
14617                 case 512:
14618                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14619                                 DMA_RWCTRL_WRITE_BNDRY_512);
14620                         break;
14621                 case 1024:
14622                 default:
14623                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14624                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14625                         break;
14626                 }
14627         }
14628
14629 out:
14630         return val;
14631 }
14632
14633 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14634 {
14635         struct tg3_internal_buffer_desc test_desc;
14636         u32 sram_dma_descs;
14637         int i, ret;
14638
14639         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14640
14641         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14642         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14643         tw32(RDMAC_STATUS, 0);
14644         tw32(WDMAC_STATUS, 0);
14645
14646         tw32(BUFMGR_MODE, 0);
14647         tw32(FTQ_RESET, 0);
14648
14649         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14650         test_desc.addr_lo = buf_dma & 0xffffffff;
14651         test_desc.nic_mbuf = 0x00002100;
14652         test_desc.len = size;
14653
14654         /*
14655          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14656          * the *second* time the tg3 driver was getting loaded after an
14657          * initial scan.
14658          *
14659          * Broadcom tells me:
14660          *   ...the DMA engine is connected to the GRC block and a DMA
14661          *   reset may affect the GRC block in some unpredictable way...
14662          *   The behavior of resets to individual blocks has not been tested.
14663          *
14664          * Broadcom noted the GRC reset will also reset all sub-components.
14665          */
14666         if (to_device) {
14667                 test_desc.cqid_sqid = (13 << 8) | 2;
14668
14669                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14670                 udelay(40);
14671         } else {
14672                 test_desc.cqid_sqid = (16 << 8) | 7;
14673
14674                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14675                 udelay(40);
14676         }
14677         test_desc.flags = 0x00000005;
14678
14679         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14680                 u32 val;
14681
14682                 val = *(((u32 *)&test_desc) + i);
14683                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14684                                        sram_dma_descs + (i * sizeof(u32)));
14685                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14686         }
14687         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14688
14689         if (to_device)
14690                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14691         else
14692                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14693
14694         ret = -ENODEV;
14695         for (i = 0; i < 40; i++) {
14696                 u32 val;
14697
14698                 if (to_device)
14699                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14700                 else
14701                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14702                 if ((val & 0xffff) == sram_dma_descs) {
14703                         ret = 0;
14704                         break;
14705                 }
14706
14707                 udelay(100);
14708         }
14709
14710         return ret;
14711 }
14712
14713 #define TEST_BUFFER_SIZE        0x2000
14714
14715 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14716         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14717         { },
14718 };
14719
14720 static int __devinit tg3_test_dma(struct tg3 *tp)
14721 {
14722         dma_addr_t buf_dma;
14723         u32 *buf, saved_dma_rwctrl;
14724         int ret = 0;
14725
14726         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14727                                  &buf_dma, GFP_KERNEL);
14728         if (!buf) {
14729                 ret = -ENOMEM;
14730                 goto out_nofree;
14731         }
14732
14733         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14734                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14735
14736         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14737
14738         if (tg3_flag(tp, 57765_PLUS))
14739                 goto out;
14740
14741         if (tg3_flag(tp, PCI_EXPRESS)) {
14742                 /* DMA read watermark not used on PCIE */
14743                 tp->dma_rwctrl |= 0x00180000;
14744         } else if (!tg3_flag(tp, PCIX_MODE)) {
14745                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14746                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14747                         tp->dma_rwctrl |= 0x003f0000;
14748                 else
14749                         tp->dma_rwctrl |= 0x003f000f;
14750         } else {
14751                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14752                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14753                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14754                         u32 read_water = 0x7;
14755
14756                         /* If the 5704 is behind the EPB bridge, we can
14757                          * do the less restrictive ONE_DMA workaround for
14758                          * better performance.
14759                          */
14760                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14761                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14762                                 tp->dma_rwctrl |= 0x8000;
14763                         else if (ccval == 0x6 || ccval == 0x7)
14764                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14765
14766                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14767                                 read_water = 4;
14768                         /* Set bit 23 to enable PCIX hw bug fix */
14769                         tp->dma_rwctrl |=
14770                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14771                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14772                                 (1 << 23);
14773                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14774                         /* 5780 always in PCIX mode */
14775                         tp->dma_rwctrl |= 0x00144000;
14776                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14777                         /* 5714 always in PCIX mode */
14778                         tp->dma_rwctrl |= 0x00148000;
14779                 } else {
14780                         tp->dma_rwctrl |= 0x001b000f;
14781                 }
14782         }
14783
14784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14785             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14786                 tp->dma_rwctrl &= 0xfffffff0;
14787
14788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14790                 /* Remove this if it causes problems for some boards. */
14791                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14792
14793                 /* On 5700/5701 chips, we need to set this bit.
14794                  * Otherwise the chip will issue cacheline transactions
14795                  * to streamable DMA memory with not all the byte
14796                  * enables turned on.  This is an error on several
14797                  * RISC PCI controllers, in particular sparc64.
14798                  *
14799                  * On 5703/5704 chips, this bit has been reassigned
14800                  * a different meaning.  In particular, it is used
14801                  * on those chips to enable a PCI-X workaround.
14802                  */
14803                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14804         }
14805
14806         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14807
14808 #if 0
14809         /* Unneeded, already done by tg3_get_invariants.  */
14810         tg3_switch_clocks(tp);
14811 #endif
14812
14813         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14814             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14815                 goto out;
14816
14817         /* It is best to perform DMA test with maximum write burst size
14818          * to expose the 5700/5701 write DMA bug.
14819          */
14820         saved_dma_rwctrl = tp->dma_rwctrl;
14821         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14822         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14823
14824         while (1) {
14825                 u32 *p = buf, i;
14826
14827                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14828                         p[i] = i;
14829
14830                 /* Send the buffer to the chip. */
14831                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14832                 if (ret) {
14833                         dev_err(&tp->pdev->dev,
14834                                 "%s: Buffer write failed. err = %d\n",
14835                                 __func__, ret);
14836                         break;
14837                 }
14838
14839 #if 0
14840                 /* validate data reached card RAM correctly. */
14841                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14842                         u32 val;
14843                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14844                         if (le32_to_cpu(val) != p[i]) {
14845                                 dev_err(&tp->pdev->dev,
14846                                         "%s: Buffer corrupted on device! "
14847                                         "(%d != %d)\n", __func__, val, i);
14848                                 /* ret = -ENODEV here? */
14849                         }
14850                         p[i] = 0;
14851                 }
14852 #endif
14853                 /* Now read it back. */
14854                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14855                 if (ret) {
14856                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14857                                 "err = %d\n", __func__, ret);
14858                         break;
14859                 }
14860
14861                 /* Verify it. */
14862                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14863                         if (p[i] == i)
14864                                 continue;
14865
14866                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14867                             DMA_RWCTRL_WRITE_BNDRY_16) {
14868                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14869                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14870                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14871                                 break;
14872                         } else {
14873                                 dev_err(&tp->pdev->dev,
14874                                         "%s: Buffer corrupted on read back! "
14875                                         "(%d != %d)\n", __func__, p[i], i);
14876                                 ret = -ENODEV;
14877                                 goto out;
14878                         }
14879                 }
14880
14881                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14882                         /* Success. */
14883                         ret = 0;
14884                         break;
14885                 }
14886         }
14887         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14888             DMA_RWCTRL_WRITE_BNDRY_16) {
14889                 /* DMA test passed without adjusting DMA boundary,
14890                  * now look for chipsets that are known to expose the
14891                  * DMA bug without failing the test.
14892                  */
14893                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14894                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14895                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14896                 } else {
14897                         /* Safe to use the calculated DMA boundary. */
14898                         tp->dma_rwctrl = saved_dma_rwctrl;
14899                 }
14900
14901                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14902         }
14903
14904 out:
14905         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14906 out_nofree:
14907         return ret;
14908 }
14909
14910 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14911 {
14912         if (tg3_flag(tp, 57765_PLUS)) {
14913                 tp->bufmgr_config.mbuf_read_dma_low_water =
14914                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14915                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14916                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14917                 tp->bufmgr_config.mbuf_high_water =
14918                         DEFAULT_MB_HIGH_WATER_57765;
14919
14920                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14921                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14922                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14923                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14924                 tp->bufmgr_config.mbuf_high_water_jumbo =
14925                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14926         } else if (tg3_flag(tp, 5705_PLUS)) {
14927                 tp->bufmgr_config.mbuf_read_dma_low_water =
14928                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14929                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14930                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14931                 tp->bufmgr_config.mbuf_high_water =
14932                         DEFAULT_MB_HIGH_WATER_5705;
14933                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14934                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14935                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14936                         tp->bufmgr_config.mbuf_high_water =
14937                                 DEFAULT_MB_HIGH_WATER_5906;
14938                 }
14939
14940                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14941                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14942                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14943                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14944                 tp->bufmgr_config.mbuf_high_water_jumbo =
14945                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14946         } else {
14947                 tp->bufmgr_config.mbuf_read_dma_low_water =
14948                         DEFAULT_MB_RDMA_LOW_WATER;
14949                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14950                         DEFAULT_MB_MACRX_LOW_WATER;
14951                 tp->bufmgr_config.mbuf_high_water =
14952                         DEFAULT_MB_HIGH_WATER;
14953
14954                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14955                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14956                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14957                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14958                 tp->bufmgr_config.mbuf_high_water_jumbo =
14959                         DEFAULT_MB_HIGH_WATER_JUMBO;
14960         }
14961
14962         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14963         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14964 }
14965
14966 static char * __devinit tg3_phy_string(struct tg3 *tp)
14967 {
14968         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14969         case TG3_PHY_ID_BCM5400:        return "5400";
14970         case TG3_PHY_ID_BCM5401:        return "5401";
14971         case TG3_PHY_ID_BCM5411:        return "5411";
14972         case TG3_PHY_ID_BCM5701:        return "5701";
14973         case TG3_PHY_ID_BCM5703:        return "5703";
14974         case TG3_PHY_ID_BCM5704:        return "5704";
14975         case TG3_PHY_ID_BCM5705:        return "5705";
14976         case TG3_PHY_ID_BCM5750:        return "5750";
14977         case TG3_PHY_ID_BCM5752:        return "5752";
14978         case TG3_PHY_ID_BCM5714:        return "5714";
14979         case TG3_PHY_ID_BCM5780:        return "5780";
14980         case TG3_PHY_ID_BCM5755:        return "5755";
14981         case TG3_PHY_ID_BCM5787:        return "5787";
14982         case TG3_PHY_ID_BCM5784:        return "5784";
14983         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14984         case TG3_PHY_ID_BCM5906:        return "5906";
14985         case TG3_PHY_ID_BCM5761:        return "5761";
14986         case TG3_PHY_ID_BCM5718C:       return "5718C";
14987         case TG3_PHY_ID_BCM5718S:       return "5718S";
14988         case TG3_PHY_ID_BCM57765:       return "57765";
14989         case TG3_PHY_ID_BCM5719C:       return "5719C";
14990         case TG3_PHY_ID_BCM5720C:       return "5720C";
14991         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14992         case 0:                 return "serdes";
14993         default:                return "unknown";
14994         }
14995 }
14996
14997 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14998 {
14999         if (tg3_flag(tp, PCI_EXPRESS)) {
15000                 strcpy(str, "PCI Express");
15001                 return str;
15002         } else if (tg3_flag(tp, PCIX_MODE)) {
15003                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15004
15005                 strcpy(str, "PCIX:");
15006
15007                 if ((clock_ctrl == 7) ||
15008                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15009                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15010                         strcat(str, "133MHz");
15011                 else if (clock_ctrl == 0)
15012                         strcat(str, "33MHz");
15013                 else if (clock_ctrl == 2)
15014                         strcat(str, "50MHz");
15015                 else if (clock_ctrl == 4)
15016                         strcat(str, "66MHz");
15017                 else if (clock_ctrl == 6)
15018                         strcat(str, "100MHz");
15019         } else {
15020                 strcpy(str, "PCI:");
15021                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15022                         strcat(str, "66MHz");
15023                 else
15024                         strcat(str, "33MHz");
15025         }
15026         if (tg3_flag(tp, PCI_32BIT))
15027                 strcat(str, ":32-bit");
15028         else
15029                 strcat(str, ":64-bit");
15030         return str;
15031 }
15032
15033 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15034 {
15035         struct pci_dev *peer;
15036         unsigned int func, devnr = tp->pdev->devfn & ~7;
15037
15038         for (func = 0; func < 8; func++) {
15039                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15040                 if (peer && peer != tp->pdev)
15041                         break;
15042                 pci_dev_put(peer);
15043         }
15044         /* 5704 can be configured in single-port mode, set peer to
15045          * tp->pdev in that case.
15046          */
15047         if (!peer) {
15048                 peer = tp->pdev;
15049                 return peer;
15050         }
15051
15052         /*
15053          * We don't need to keep the refcount elevated; there's no way
15054          * to remove one half of this device without removing the other
15055          */
15056         pci_dev_put(peer);
15057
15058         return peer;
15059 }
15060
15061 static void __devinit tg3_init_coal(struct tg3 *tp)
15062 {
15063         struct ethtool_coalesce *ec = &tp->coal;
15064
15065         memset(ec, 0, sizeof(*ec));
15066         ec->cmd = ETHTOOL_GCOALESCE;
15067         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15068         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15069         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15070         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15071         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15072         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15073         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15074         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15075         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15076
15077         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15078                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15079                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15080                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15081                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15082                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15083         }
15084
15085         if (tg3_flag(tp, 5705_PLUS)) {
15086                 ec->rx_coalesce_usecs_irq = 0;
15087                 ec->tx_coalesce_usecs_irq = 0;
15088                 ec->stats_block_coalesce_usecs = 0;
15089         }
15090 }
15091
15092 static const struct net_device_ops tg3_netdev_ops = {
15093         .ndo_open               = tg3_open,
15094         .ndo_stop               = tg3_close,
15095         .ndo_start_xmit         = tg3_start_xmit,
15096         .ndo_get_stats64        = tg3_get_stats64,
15097         .ndo_validate_addr      = eth_validate_addr,
15098         .ndo_set_multicast_list = tg3_set_rx_mode,
15099         .ndo_set_mac_address    = tg3_set_mac_addr,
15100         .ndo_do_ioctl           = tg3_ioctl,
15101         .ndo_tx_timeout         = tg3_tx_timeout,
15102         .ndo_change_mtu         = tg3_change_mtu,
15103         .ndo_fix_features       = tg3_fix_features,
15104         .ndo_set_features       = tg3_set_features,
15105 #ifdef CONFIG_NET_POLL_CONTROLLER
15106         .ndo_poll_controller    = tg3_poll_controller,
15107 #endif
15108 };
15109
15110 static int __devinit tg3_init_one(struct pci_dev *pdev,
15111                                   const struct pci_device_id *ent)
15112 {
15113         struct net_device *dev;
15114         struct tg3 *tp;
15115         int i, err, pm_cap;
15116         u32 sndmbx, rcvmbx, intmbx;
15117         char str[40];
15118         u64 dma_mask, persist_dma_mask;
15119         u32 features = 0;
15120
15121         printk_once(KERN_INFO "%s\n", version);
15122
15123         err = pci_enable_device(pdev);
15124         if (err) {
15125                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15126                 return err;
15127         }
15128
15129         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15130         if (err) {
15131                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15132                 goto err_out_disable_pdev;
15133         }
15134
15135         pci_set_master(pdev);
15136
15137         /* Find power-management capability. */
15138         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15139         if (pm_cap == 0) {
15140                 dev_err(&pdev->dev,
15141                         "Cannot find Power Management capability, aborting\n");
15142                 err = -EIO;
15143                 goto err_out_free_res;
15144         }
15145
15146         err = pci_set_power_state(pdev, PCI_D0);
15147         if (err) {
15148                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15149                 goto err_out_free_res;
15150         }
15151
15152         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15153         if (!dev) {
15154                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15155                 err = -ENOMEM;
15156                 goto err_out_power_down;
15157         }
15158
15159         SET_NETDEV_DEV(dev, &pdev->dev);
15160
15161         tp = netdev_priv(dev);
15162         tp->pdev = pdev;
15163         tp->dev = dev;
15164         tp->pm_cap = pm_cap;
15165         tp->rx_mode = TG3_DEF_RX_MODE;
15166         tp->tx_mode = TG3_DEF_TX_MODE;
15167
15168         if (tg3_debug > 0)
15169                 tp->msg_enable = tg3_debug;
15170         else
15171                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15172
15173         /* The word/byte swap controls here control register access byte
15174          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15175          * setting below.
15176          */
15177         tp->misc_host_ctrl =
15178                 MISC_HOST_CTRL_MASK_PCI_INT |
15179                 MISC_HOST_CTRL_WORD_SWAP |
15180                 MISC_HOST_CTRL_INDIR_ACCESS |
15181                 MISC_HOST_CTRL_PCISTATE_RW;
15182
15183         /* The NONFRM (non-frame) byte/word swap controls take effect
15184          * on descriptor entries, anything which isn't packet data.
15185          *
15186          * The StrongARM chips on the board (one for tx, one for rx)
15187          * are running in big-endian mode.
15188          */
15189         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15190                         GRC_MODE_WSWAP_NONFRM_DATA);
15191 #ifdef __BIG_ENDIAN
15192         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15193 #endif
15194         spin_lock_init(&tp->lock);
15195         spin_lock_init(&tp->indirect_lock);
15196         INIT_WORK(&tp->reset_task, tg3_reset_task);
15197
15198         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15199         if (!tp->regs) {
15200                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15201                 err = -ENOMEM;
15202                 goto err_out_free_dev;
15203         }
15204
15205         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15206             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15207             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15208             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15209             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15210             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15211             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15212             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15213                 tg3_flag_set(tp, ENABLE_APE);
15214                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15215                 if (!tp->aperegs) {
15216                         dev_err(&pdev->dev,
15217                                 "Cannot map APE registers, aborting\n");
15218                         err = -ENOMEM;
15219                         goto err_out_iounmap;
15220                 }
15221         }
15222
15223         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15224         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15225
15226         dev->ethtool_ops = &tg3_ethtool_ops;
15227         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15228         dev->netdev_ops = &tg3_netdev_ops;
15229         dev->irq = pdev->irq;
15230
15231         err = tg3_get_invariants(tp);
15232         if (err) {
15233                 dev_err(&pdev->dev,
15234                         "Problem fetching invariants of chip, aborting\n");
15235                 goto err_out_apeunmap;
15236         }
15237
15238         /* The EPB bridge inside 5714, 5715, and 5780 and any
15239          * device behind the EPB cannot support DMA addresses > 40-bit.
15240          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15241          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15242          * do DMA address check in tg3_start_xmit().
15243          */
15244         if (tg3_flag(tp, IS_5788))
15245                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15246         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15247                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15248 #ifdef CONFIG_HIGHMEM
15249                 dma_mask = DMA_BIT_MASK(64);
15250 #endif
15251         } else
15252                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15253
15254         /* Configure DMA attributes. */
15255         if (dma_mask > DMA_BIT_MASK(32)) {
15256                 err = pci_set_dma_mask(pdev, dma_mask);
15257                 if (!err) {
15258                         features |= NETIF_F_HIGHDMA;
15259                         err = pci_set_consistent_dma_mask(pdev,
15260                                                           persist_dma_mask);
15261                         if (err < 0) {
15262                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15263                                         "DMA for consistent allocations\n");
15264                                 goto err_out_apeunmap;
15265                         }
15266                 }
15267         }
15268         if (err || dma_mask == DMA_BIT_MASK(32)) {
15269                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15270                 if (err) {
15271                         dev_err(&pdev->dev,
15272                                 "No usable DMA configuration, aborting\n");
15273                         goto err_out_apeunmap;
15274                 }
15275         }
15276
15277         tg3_init_bufmgr_config(tp);
15278
15279         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15280
15281         /* 5700 B0 chips do not support checksumming correctly due
15282          * to hardware bugs.
15283          */
15284         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15285                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15286
15287                 if (tg3_flag(tp, 5755_PLUS))
15288                         features |= NETIF_F_IPV6_CSUM;
15289         }
15290
15291         /* TSO is on by default on chips that support hardware TSO.
15292          * Firmware TSO on older chips gives lower performance, so it
15293          * is off by default, but can be enabled using ethtool.
15294          */
15295         if ((tg3_flag(tp, HW_TSO_1) ||
15296              tg3_flag(tp, HW_TSO_2) ||
15297              tg3_flag(tp, HW_TSO_3)) &&
15298             (features & NETIF_F_IP_CSUM))
15299                 features |= NETIF_F_TSO;
15300         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15301                 if (features & NETIF_F_IPV6_CSUM)
15302                         features |= NETIF_F_TSO6;
15303                 if (tg3_flag(tp, HW_TSO_3) ||
15304                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15305                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15306                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15307                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15308                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15309                         features |= NETIF_F_TSO_ECN;
15310         }
15311
15312         dev->features |= features;
15313         dev->vlan_features |= features;
15314
15315         /*
15316          * Add loopback capability only for a subset of devices that support
15317          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15318          * loopback for the remaining devices.
15319          */
15320         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15321             !tg3_flag(tp, CPMU_PRESENT))
15322                 /* Add the loopback capability */
15323                 features |= NETIF_F_LOOPBACK;
15324
15325         dev->hw_features |= features;
15326
15327         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15328             !tg3_flag(tp, TSO_CAPABLE) &&
15329             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15330                 tg3_flag_set(tp, MAX_RXPEND_64);
15331                 tp->rx_pending = 63;
15332         }
15333
15334         err = tg3_get_device_address(tp);
15335         if (err) {
15336                 dev_err(&pdev->dev,
15337                         "Could not obtain valid ethernet address, aborting\n");
15338                 goto err_out_apeunmap;
15339         }
15340
15341         /*
15342          * Reset chip in case UNDI or EFI driver did not shutdown
15343          * DMA self test will enable WDMAC and we'll see (spurious)
15344          * pending DMA on the PCI bus at that point.
15345          */
15346         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15347             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15348                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15349                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15350         }
15351
15352         err = tg3_test_dma(tp);
15353         if (err) {
15354                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15355                 goto err_out_apeunmap;
15356         }
15357
15358         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15359         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15360         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15361         for (i = 0; i < tp->irq_max; i++) {
15362                 struct tg3_napi *tnapi = &tp->napi[i];
15363
15364                 tnapi->tp = tp;
15365                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15366
15367                 tnapi->int_mbox = intmbx;
15368                 if (i < 4)
15369                         intmbx += 0x8;
15370                 else
15371                         intmbx += 0x4;
15372
15373                 tnapi->consmbox = rcvmbx;
15374                 tnapi->prodmbox = sndmbx;
15375
15376                 if (i)
15377                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15378                 else
15379                         tnapi->coal_now = HOSTCC_MODE_NOW;
15380
15381                 if (!tg3_flag(tp, SUPPORT_MSIX))
15382                         break;
15383
15384                 /*
15385                  * If we support MSIX, we'll be using RSS.  If we're using
15386                  * RSS, the first vector only handles link interrupts and the
15387                  * remaining vectors handle rx and tx interrupts.  Reuse the
15388                  * mailbox values for the next iteration.  The values we setup
15389                  * above are still useful for the single vectored mode.
15390                  */
15391                 if (!i)
15392                         continue;
15393
15394                 rcvmbx += 0x8;
15395
15396                 if (sndmbx & 0x4)
15397                         sndmbx -= 0x4;
15398                 else
15399                         sndmbx += 0xc;
15400         }
15401
15402         tg3_init_coal(tp);
15403
15404         pci_set_drvdata(pdev, dev);
15405
15406         if (tg3_flag(tp, 5717_PLUS)) {
15407                 /* Resume a low-power mode */
15408                 tg3_frob_aux_power(tp, false);
15409         }
15410
15411         err = register_netdev(dev);
15412         if (err) {
15413                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15414                 goto err_out_apeunmap;
15415         }
15416
15417         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15418                     tp->board_part_number,
15419                     tp->pci_chip_rev_id,
15420                     tg3_bus_string(tp, str),
15421                     dev->dev_addr);
15422
15423         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15424                 struct phy_device *phydev;
15425                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15426                 netdev_info(dev,
15427                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15428                             phydev->drv->name, dev_name(&phydev->dev));
15429         } else {
15430                 char *ethtype;
15431
15432                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15433                         ethtype = "10/100Base-TX";
15434                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15435                         ethtype = "1000Base-SX";
15436                 else
15437                         ethtype = "10/100/1000Base-T";
15438
15439                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15440                             "(WireSpeed[%d], EEE[%d])\n",
15441                             tg3_phy_string(tp), ethtype,
15442                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15443                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15444         }
15445
15446         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15447                     (dev->features & NETIF_F_RXCSUM) != 0,
15448                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15449                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15450                     tg3_flag(tp, ENABLE_ASF) != 0,
15451                     tg3_flag(tp, TSO_CAPABLE) != 0);
15452         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15453                     tp->dma_rwctrl,
15454                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15455                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15456
15457         pci_save_state(pdev);
15458
15459         return 0;
15460
15461 err_out_apeunmap:
15462         if (tp->aperegs) {
15463                 iounmap(tp->aperegs);
15464                 tp->aperegs = NULL;
15465         }
15466
15467 err_out_iounmap:
15468         if (tp->regs) {
15469                 iounmap(tp->regs);
15470                 tp->regs = NULL;
15471         }
15472
15473 err_out_free_dev:
15474         free_netdev(dev);
15475
15476 err_out_power_down:
15477         pci_set_power_state(pdev, PCI_D3hot);
15478
15479 err_out_free_res:
15480         pci_release_regions(pdev);
15481
15482 err_out_disable_pdev:
15483         pci_disable_device(pdev);
15484         pci_set_drvdata(pdev, NULL);
15485         return err;
15486 }
15487
15488 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15489 {
15490         struct net_device *dev = pci_get_drvdata(pdev);
15491
15492         if (dev) {
15493                 struct tg3 *tp = netdev_priv(dev);
15494
15495                 if (tp->fw)
15496                         release_firmware(tp->fw);
15497
15498                 cancel_work_sync(&tp->reset_task);
15499
15500                 if (!tg3_flag(tp, USE_PHYLIB)) {
15501                         tg3_phy_fini(tp);
15502                         tg3_mdio_fini(tp);
15503                 }
15504
15505                 unregister_netdev(dev);
15506                 if (tp->aperegs) {
15507                         iounmap(tp->aperegs);
15508                         tp->aperegs = NULL;
15509                 }
15510                 if (tp->regs) {
15511                         iounmap(tp->regs);
15512                         tp->regs = NULL;
15513                 }
15514                 free_netdev(dev);
15515                 pci_release_regions(pdev);
15516                 pci_disable_device(pdev);
15517                 pci_set_drvdata(pdev, NULL);
15518         }
15519 }
15520
15521 #ifdef CONFIG_PM_SLEEP
15522 static int tg3_suspend(struct device *device)
15523 {
15524         struct pci_dev *pdev = to_pci_dev(device);
15525         struct net_device *dev = pci_get_drvdata(pdev);
15526         struct tg3 *tp = netdev_priv(dev);
15527         int err;
15528
15529         if (!netif_running(dev))
15530                 return 0;
15531
15532         flush_work_sync(&tp->reset_task);
15533         tg3_phy_stop(tp);
15534         tg3_netif_stop(tp);
15535
15536         del_timer_sync(&tp->timer);
15537
15538         tg3_full_lock(tp, 1);
15539         tg3_disable_ints(tp);
15540         tg3_full_unlock(tp);
15541
15542         netif_device_detach(dev);
15543
15544         tg3_full_lock(tp, 0);
15545         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15546         tg3_flag_clear(tp, INIT_COMPLETE);
15547         tg3_full_unlock(tp);
15548
15549         err = tg3_power_down_prepare(tp);
15550         if (err) {
15551                 int err2;
15552
15553                 tg3_full_lock(tp, 0);
15554
15555                 tg3_flag_set(tp, INIT_COMPLETE);
15556                 err2 = tg3_restart_hw(tp, 1);
15557                 if (err2)
15558                         goto out;
15559
15560                 tp->timer.expires = jiffies + tp->timer_offset;
15561                 add_timer(&tp->timer);
15562
15563                 netif_device_attach(dev);
15564                 tg3_netif_start(tp);
15565
15566 out:
15567                 tg3_full_unlock(tp);
15568
15569                 if (!err2)
15570                         tg3_phy_start(tp);
15571         }
15572
15573         return err;
15574 }
15575
15576 static int tg3_resume(struct device *device)
15577 {
15578         struct pci_dev *pdev = to_pci_dev(device);
15579         struct net_device *dev = pci_get_drvdata(pdev);
15580         struct tg3 *tp = netdev_priv(dev);
15581         int err;
15582
15583         if (!netif_running(dev))
15584                 return 0;
15585
15586         netif_device_attach(dev);
15587
15588         tg3_full_lock(tp, 0);
15589
15590         tg3_flag_set(tp, INIT_COMPLETE);
15591         err = tg3_restart_hw(tp, 1);
15592         if (err)
15593                 goto out;
15594
15595         tp->timer.expires = jiffies + tp->timer_offset;
15596         add_timer(&tp->timer);
15597
15598         tg3_netif_start(tp);
15599
15600 out:
15601         tg3_full_unlock(tp);
15602
15603         if (!err)
15604                 tg3_phy_start(tp);
15605
15606         return err;
15607 }
15608
15609 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15610 #define TG3_PM_OPS (&tg3_pm_ops)
15611
15612 #else
15613
15614 #define TG3_PM_OPS NULL
15615
15616 #endif /* CONFIG_PM_SLEEP */
15617
15618 /**
15619  * tg3_io_error_detected - called when PCI error is detected
15620  * @pdev: Pointer to PCI device
15621  * @state: The current pci connection state
15622  *
15623  * This function is called after a PCI bus error affecting
15624  * this device has been detected.
15625  */
15626 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15627                                               pci_channel_state_t state)
15628 {
15629         struct net_device *netdev = pci_get_drvdata(pdev);
15630         struct tg3 *tp = netdev_priv(netdev);
15631         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15632
15633         netdev_info(netdev, "PCI I/O error detected\n");
15634
15635         rtnl_lock();
15636
15637         if (!netif_running(netdev))
15638                 goto done;
15639
15640         tg3_phy_stop(tp);
15641
15642         tg3_netif_stop(tp);
15643
15644         del_timer_sync(&tp->timer);
15645         tg3_flag_clear(tp, RESTART_TIMER);
15646
15647         /* Want to make sure that the reset task doesn't run */
15648         cancel_work_sync(&tp->reset_task);
15649         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15650         tg3_flag_clear(tp, RESTART_TIMER);
15651
15652         netif_device_detach(netdev);
15653
15654         /* Clean up software state, even if MMIO is blocked */
15655         tg3_full_lock(tp, 0);
15656         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15657         tg3_full_unlock(tp);
15658
15659 done:
15660         if (state == pci_channel_io_perm_failure)
15661                 err = PCI_ERS_RESULT_DISCONNECT;
15662         else
15663                 pci_disable_device(pdev);
15664
15665         rtnl_unlock();
15666
15667         return err;
15668 }
15669
15670 /**
15671  * tg3_io_slot_reset - called after the pci bus has been reset.
15672  * @pdev: Pointer to PCI device
15673  *
15674  * Restart the card from scratch, as if from a cold-boot.
15675  * At this point, the card has exprienced a hard reset,
15676  * followed by fixups by BIOS, and has its config space
15677  * set up identically to what it was at cold boot.
15678  */
15679 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15680 {
15681         struct net_device *netdev = pci_get_drvdata(pdev);
15682         struct tg3 *tp = netdev_priv(netdev);
15683         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15684         int err;
15685
15686         rtnl_lock();
15687
15688         if (pci_enable_device(pdev)) {
15689                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15690                 goto done;
15691         }
15692
15693         pci_set_master(pdev);
15694         pci_restore_state(pdev);
15695         pci_save_state(pdev);
15696
15697         if (!netif_running(netdev)) {
15698                 rc = PCI_ERS_RESULT_RECOVERED;
15699                 goto done;
15700         }
15701
15702         err = tg3_power_up(tp);
15703         if (err)
15704                 goto done;
15705
15706         rc = PCI_ERS_RESULT_RECOVERED;
15707
15708 done:
15709         rtnl_unlock();
15710
15711         return rc;
15712 }
15713
15714 /**
15715  * tg3_io_resume - called when traffic can start flowing again.
15716  * @pdev: Pointer to PCI device
15717  *
15718  * This callback is called when the error recovery driver tells
15719  * us that its OK to resume normal operation.
15720  */
15721 static void tg3_io_resume(struct pci_dev *pdev)
15722 {
15723         struct net_device *netdev = pci_get_drvdata(pdev);
15724         struct tg3 *tp = netdev_priv(netdev);
15725         int err;
15726
15727         rtnl_lock();
15728
15729         if (!netif_running(netdev))
15730                 goto done;
15731
15732         tg3_full_lock(tp, 0);
15733         tg3_flag_set(tp, INIT_COMPLETE);
15734         err = tg3_restart_hw(tp, 1);
15735         tg3_full_unlock(tp);
15736         if (err) {
15737                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15738                 goto done;
15739         }
15740
15741         netif_device_attach(netdev);
15742
15743         tp->timer.expires = jiffies + tp->timer_offset;
15744         add_timer(&tp->timer);
15745
15746         tg3_netif_start(tp);
15747
15748         tg3_phy_start(tp);
15749
15750 done:
15751         rtnl_unlock();
15752 }
15753
15754 static struct pci_error_handlers tg3_err_handler = {
15755         .error_detected = tg3_io_error_detected,
15756         .slot_reset     = tg3_io_slot_reset,
15757         .resume         = tg3_io_resume
15758 };
15759
15760 static struct pci_driver tg3_driver = {
15761         .name           = DRV_MODULE_NAME,
15762         .id_table       = tg3_pci_tbl,
15763         .probe          = tg3_init_one,
15764         .remove         = __devexit_p(tg3_remove_one),
15765         .err_handler    = &tg3_err_handler,
15766         .driver.pm      = TG3_PM_OPS,
15767 };
15768
15769 static int __init tg3_init(void)
15770 {
15771         return pci_register_driver(&tg3_driver);
15772 }
15773
15774 static void __exit tg3_cleanup(void)
15775 {
15776         pci_unregister_driver(&tg3_driver);
15777 }
15778
15779 module_init(tg3_init);
15780 module_exit(tg3_cleanup);