tg3: Consolidate code that calls tg3_tx_set_bd()
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase, bit;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++) {
620                 if (i == TG3_APE_LOCK_GPIO)
621                         continue;
622                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623         }
624
625         /* Clear the correct bit of the GPIO lock too. */
626         if (!tp->pci_fn)
627                 bit = APE_LOCK_GRANT_DRIVER;
628         else
629                 bit = 1 << tp->pci_fn;
630
631         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636         int i, off;
637         int ret = 0;
638         u32 status, req, gnt, bit;
639
640         if (!tg3_flag(tp, ENABLE_APE))
641                 return 0;
642
643         switch (locknum) {
644         case TG3_APE_LOCK_GPIO:
645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646                         return 0;
647         case TG3_APE_LOCK_GRC:
648         case TG3_APE_LOCK_MEM:
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655                 req = TG3_APE_LOCK_REQ;
656                 gnt = TG3_APE_LOCK_GRANT;
657         } else {
658                 req = TG3_APE_PER_LOCK_REQ;
659                 gnt = TG3_APE_PER_LOCK_GRANT;
660         }
661
662         off = 4 * locknum;
663
664         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665                 bit = APE_LOCK_REQ_DRIVER;
666         else
667                 bit = 1 << tp->pci_fn;
668
669         tg3_ape_write32(tp, req + off, bit);
670
671         /* Wait for up to 1 millisecond to acquire lock. */
672         for (i = 0; i < 100; i++) {
673                 status = tg3_ape_read32(tp, gnt + off);
674                 if (status == bit)
675                         break;
676                 udelay(10);
677         }
678
679         if (status != bit) {
680                 /* Revoke the lock request. */
681                 tg3_ape_write32(tp, gnt + off, bit);
682                 ret = -EBUSY;
683         }
684
685         return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690         u32 gnt, bit;
691
692         if (!tg3_flag(tp, ENABLE_APE))
693                 return;
694
695         switch (locknum) {
696         case TG3_APE_LOCK_GPIO:
697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698                         return;
699         case TG3_APE_LOCK_GRC:
700         case TG3_APE_LOCK_MEM:
701                 break;
702         default:
703                 return;
704         }
705
706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707                 gnt = TG3_APE_LOCK_GRANT;
708         else
709                 gnt = TG3_APE_PER_LOCK_GRANT;
710
711         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712                 bit = APE_LOCK_GRANT_DRIVER;
713         else
714                 bit = 1 << tp->pci_fn;
715
716         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721         int i;
722
723         tw32(TG3PCI_MISC_HOST_CTRL,
724              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725         for (i = 0; i < tp->irq_max; i++)
726                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731         int i;
732
733         tp->irq_sync = 0;
734         wmb();
735
736         tw32(TG3PCI_MISC_HOST_CTRL,
737              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740         for (i = 0; i < tp->irq_cnt; i++) {
741                 struct tg3_napi *tnapi = &tp->napi[i];
742
743                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744                 if (tg3_flag(tp, 1SHOT_MSI))
745                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747                 tp->coal_now |= tnapi->coal_now;
748         }
749
750         /* Force an initial interrupt */
751         if (!tg3_flag(tp, TAGGED_STATUS) &&
752             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754         else
755                 tw32(HOSTCC_MODE, tp->coal_now);
756
757         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762         struct tg3 *tp = tnapi->tp;
763         struct tg3_hw_status *sblk = tnapi->hw_status;
764         unsigned int work_exists = 0;
765
766         /* check for phy events */
767         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768                 if (sblk->status & SD_STATUS_LINK_CHG)
769                         work_exists = 1;
770         }
771         /* check for RX/TX work to do */
772         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774                 work_exists = 1;
775
776         return work_exists;
777 }
778
779 /* tg3_int_reenable
780  *  similar to tg3_enable_ints, but it accurately determines whether there
781  *  is new work pending and can return without flushing the PIO write
782  *  which reenables interrupts
783  */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786         struct tg3 *tp = tnapi->tp;
787
788         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789         mmiowb();
790
791         /* When doing tagged status, this work check is unnecessary.
792          * The last_tag we write above tells the chip which piece of
793          * work we've completed.
794          */
795         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796                 tw32(HOSTCC_MODE, tp->coalesce_mode |
797                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802         u32 clock_ctrl;
803         u32 orig_clock_ctrl;
804
805         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806                 return;
807
808         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810         orig_clock_ctrl = clock_ctrl;
811         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812                        CLOCK_CTRL_CLKRUN_OENABLE |
813                        0x1f);
814         tp->pci_clock_ctrl = clock_ctrl;
815
816         if (tg3_flag(tp, 5705_PLUS)) {
817                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
819                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820                 }
821         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823                             clock_ctrl |
824                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825                             40);
826                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
828                             40);
829         }
830         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS  5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842                 tw32_f(MAC_MI_MODE,
843                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844                 udelay(80);
845         }
846
847         *val = 0x0;
848
849         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850                       MI_COM_PHY_ADDR_MASK);
851         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852                       MI_COM_REG_ADDR_MASK);
853         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855         tw32_f(MAC_MI_COM, frame_val);
856
857         loops = PHY_BUSY_LOOPS;
858         while (loops != 0) {
859                 udelay(10);
860                 frame_val = tr32(MAC_MI_COM);
861
862                 if ((frame_val & MI_COM_BUSY) == 0) {
863                         udelay(5);
864                         frame_val = tr32(MAC_MI_COM);
865                         break;
866                 }
867                 loops -= 1;
868         }
869
870         ret = -EBUSY;
871         if (loops != 0) {
872                 *val = frame_val & MI_COM_DATA_MASK;
873                 ret = 0;
874         }
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886         u32 frame_val;
887         unsigned int loops;
888         int ret;
889
890         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892                 return 0;
893
894         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895                 tw32_f(MAC_MI_MODE,
896                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897                 udelay(80);
898         }
899
900         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901                       MI_COM_PHY_ADDR_MASK);
902         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903                       MI_COM_REG_ADDR_MASK);
904         frame_val |= (val & MI_COM_DATA_MASK);
905         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907         tw32_f(MAC_MI_COM, frame_val);
908
909         loops = PHY_BUSY_LOOPS;
910         while (loops != 0) {
911                 udelay(10);
912                 frame_val = tr32(MAC_MI_COM);
913                 if ((frame_val & MI_COM_BUSY) == 0) {
914                         udelay(5);
915                         frame_val = tr32(MAC_MI_COM);
916                         break;
917                 }
918                 loops -= 1;
919         }
920
921         ret = -EBUSY;
922         if (loops != 0)
923                 ret = 0;
924
925         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926                 tw32_f(MAC_MI_MODE, tp->mi_mode);
927                 udelay(80);
928         }
929
930         return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961         if (err)
962                 goto done;
963
964         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965         if (err)
966                 goto done;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970         if (err)
971                 goto done;
972
973         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976         return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981         int err;
982
983         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984         if (!err)
985                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987         return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992         int err;
993
994         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995         if (!err)
996                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998         return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003         int err;
1004
1005         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1008         if (!err)
1009                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011         return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017                 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033         u32 phy_control;
1034         int limit, err;
1035
1036         /* OK, reset it, and poll the BMCR_RESET bit until it
1037          * clears or we time out.
1038          */
1039         phy_control = BMCR_RESET;
1040         err = tg3_writephy(tp, MII_BMCR, phy_control);
1041         if (err != 0)
1042                 return -EBUSY;
1043
1044         limit = 5000;
1045         while (limit--) {
1046                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047                 if (err != 0)
1048                         return -EBUSY;
1049
1050                 if ((phy_control & BMCR_RESET) == 0) {
1051                         udelay(40);
1052                         break;
1053                 }
1054                 udelay(10);
1055         }
1056         if (limit < 0)
1057                 return -EBUSY;
1058
1059         return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064         struct tg3 *tp = bp->priv;
1065         u32 val;
1066
1067         spin_lock_bh(&tp->lock);
1068
1069         if (tg3_readphy(tp, reg, &val))
1070                 val = -EIO;
1071
1072         spin_unlock_bh(&tp->lock);
1073
1074         return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079         struct tg3 *tp = bp->priv;
1080         u32 ret = 0;
1081
1082         spin_lock_bh(&tp->lock);
1083
1084         if (tg3_writephy(tp, reg, val))
1085                 ret = -EIO;
1086
1087         spin_unlock_bh(&tp->lock);
1088
1089         return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094         return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099         u32 val;
1100         struct phy_device *phydev;
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104         case PHY_ID_BCM50610:
1105         case PHY_ID_BCM50610M:
1106                 val = MAC_PHYCFG2_50610_LED_MODES;
1107                 break;
1108         case PHY_ID_BCMAC131:
1109                 val = MAC_PHYCFG2_AC131_LED_MODES;
1110                 break;
1111         case PHY_ID_RTL8211C:
1112                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113                 break;
1114         case PHY_ID_RTL8201E:
1115                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116                 break;
1117         default:
1118                 return;
1119         }
1120
1121         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122                 tw32(MAC_PHYCFG2, val);
1123
1124                 val = tr32(MAC_PHYCFG1);
1125                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128                 tw32(MAC_PHYCFG1, val);
1129
1130                 return;
1131         }
1132
1133         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135                        MAC_PHYCFG2_FMODE_MASK_MASK |
1136                        MAC_PHYCFG2_GMODE_MASK_MASK |
1137                        MAC_PHYCFG2_ACT_MASK_MASK   |
1138                        MAC_PHYCFG2_QUAL_MASK_MASK |
1139                        MAC_PHYCFG2_INBAND_ENABLE;
1140
1141         tw32(MAC_PHYCFG2, val);
1142
1143         val = tr32(MAC_PHYCFG1);
1144         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151         }
1152         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154         tw32(MAC_PHYCFG1, val);
1155
1156         val = tr32(MAC_EXT_RGMII_MODE);
1157         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158                  MAC_RGMII_MODE_RX_QUALITY |
1159                  MAC_RGMII_MODE_RX_ACTIVITY |
1160                  MAC_RGMII_MODE_RX_ENG_DET |
1161                  MAC_RGMII_MODE_TX_ENABLE |
1162                  MAC_RGMII_MODE_TX_LOWPWR |
1163                  MAC_RGMII_MODE_TX_RESET);
1164         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166                         val |= MAC_RGMII_MODE_RX_INT_B |
1167                                MAC_RGMII_MODE_RX_QUALITY |
1168                                MAC_RGMII_MODE_RX_ACTIVITY |
1169                                MAC_RGMII_MODE_RX_ENG_DET;
1170                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171                         val |= MAC_RGMII_MODE_TX_ENABLE |
1172                                MAC_RGMII_MODE_TX_LOWPWR |
1173                                MAC_RGMII_MODE_TX_RESET;
1174         }
1175         tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181         tw32_f(MAC_MI_MODE, tp->mi_mode);
1182         udelay(80);
1183
1184         if (tg3_flag(tp, MDIOBUS_INITED) &&
1185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186                 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191         int i;
1192         u32 reg;
1193         struct phy_device *phydev;
1194
1195         if (tg3_flag(tp, 5717_PLUS)) {
1196                 u32 is_serdes;
1197
1198                 tp->phy_addr = tp->pci_fn + 1;
1199
1200                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202                 else
1203                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1205                 if (is_serdes)
1206                         tp->phy_addr += 7;
1207         } else
1208                 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210         tg3_mdio_start(tp);
1211
1212         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213                 return 0;
1214
1215         tp->mdio_bus = mdiobus_alloc();
1216         if (tp->mdio_bus == NULL)
1217                 return -ENOMEM;
1218
1219         tp->mdio_bus->name     = "tg3 mdio bus";
1220         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222         tp->mdio_bus->priv     = tp;
1223         tp->mdio_bus->parent   = &tp->pdev->dev;
1224         tp->mdio_bus->read     = &tg3_mdio_read;
1225         tp->mdio_bus->write    = &tg3_mdio_write;
1226         tp->mdio_bus->reset    = &tg3_mdio_reset;
1227         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1229
1230         for (i = 0; i < PHY_MAX_ADDR; i++)
1231                 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233         /* The bus registration will look for all the PHYs on the mdio bus.
1234          * Unfortunately, it does not ensure the PHY is powered up before
1235          * accessing the PHY ID registers.  A chip reset is the
1236          * quickest way to bring the device back to an operational state..
1237          */
1238         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239                 tg3_bmcr_reset(tp);
1240
1241         i = mdiobus_register(tp->mdio_bus);
1242         if (i) {
1243                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244                 mdiobus_free(tp->mdio_bus);
1245                 return i;
1246         }
1247
1248         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250         if (!phydev || !phydev->drv) {
1251                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252                 mdiobus_unregister(tp->mdio_bus);
1253                 mdiobus_free(tp->mdio_bus);
1254                 return -ENODEV;
1255         }
1256
1257         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258         case PHY_ID_BCM57780:
1259                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261                 break;
1262         case PHY_ID_BCM50610:
1263         case PHY_ID_BCM50610M:
1264                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265                                      PHY_BRCM_RX_REFCLK_UNUSED |
1266                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274                 /* fallthru */
1275         case PHY_ID_RTL8211C:
1276                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277                 break;
1278         case PHY_ID_RTL8201E:
1279         case PHY_ID_BCMAC131:
1280                 phydev->interface = PHY_INTERFACE_MODE_MII;
1281                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283                 break;
1284         }
1285
1286         tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289                 tg3_mdio_config_5785(tp);
1290
1291         return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296         if (tg3_flag(tp, MDIOBUS_INITED)) {
1297                 tg3_flag_clear(tp, MDIOBUS_INITED);
1298                 mdiobus_unregister(tp->mdio_bus);
1299                 mdiobus_free(tp->mdio_bus);
1300         }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306         u32 val;
1307
1308         val = tr32(GRC_RX_CPU_EVENT);
1309         val |= GRC_RX_CPU_DRIVER_EVENT;
1310         tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312         tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320         int i;
1321         unsigned int delay_cnt;
1322         long time_remain;
1323
1324         /* If enough time has passed, no wait is necessary. */
1325         time_remain = (long)(tp->last_event_jiffies + 1 +
1326                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327                       (long)jiffies;
1328         if (time_remain < 0)
1329                 return;
1330
1331         /* Check if we can shorten the wait time. */
1332         delay_cnt = jiffies_to_usecs(time_remain);
1333         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335         delay_cnt = (delay_cnt >> 3) + 1;
1336
1337         for (i = 0; i < delay_cnt; i++) {
1338                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339                         break;
1340                 udelay(8);
1341         }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347         u32 reg;
1348         u32 val;
1349
1350         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351                 return;
1352
1353         tg3_wait_for_event_ack(tp);
1354
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359         val = 0;
1360         if (!tg3_readphy(tp, MII_BMCR, &reg))
1361                 val = reg << 16;
1362         if (!tg3_readphy(tp, MII_BMSR, &reg))
1363                 val |= (reg & 0xffff);
1364         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366         val = 0;
1367         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368                 val = reg << 16;
1369         if (!tg3_readphy(tp, MII_LPA, &reg))
1370                 val |= (reg & 0xffff);
1371         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373         val = 0;
1374         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376                         val = reg << 16;
1377                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378                         val |= (reg & 0xffff);
1379         }
1380         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383                 val = reg << 16;
1384         else
1385                 val = 0;
1386         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388         tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393         if (!netif_carrier_ok(tp->dev)) {
1394                 netif_info(tp, link, tp->dev, "Link is down\n");
1395                 tg3_ump_link_report(tp);
1396         } else if (netif_msg_link(tp)) {
1397                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398                             (tp->link_config.active_speed == SPEED_1000 ?
1399                              1000 :
1400                              (tp->link_config.active_speed == SPEED_100 ?
1401                               100 : 10)),
1402                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1403                              "full" : "half"));
1404
1405                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407                             "on" : "off",
1408                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409                             "on" : "off");
1410
1411                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412                         netdev_info(tp->dev, "EEE is %s\n",
1413                                     tp->setlpicnt ? "enabled" : "disabled");
1414
1415                 tg3_ump_link_report(tp);
1416         }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421         u16 miireg;
1422
1423         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424                 miireg = ADVERTISE_PAUSE_CAP;
1425         else if (flow_ctrl & FLOW_CTRL_TX)
1426                 miireg = ADVERTISE_PAUSE_ASYM;
1427         else if (flow_ctrl & FLOW_CTRL_RX)
1428                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429         else
1430                 miireg = 0;
1431
1432         return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437         u16 miireg;
1438
1439         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440                 miireg = ADVERTISE_1000XPAUSE;
1441         else if (flow_ctrl & FLOW_CTRL_TX)
1442                 miireg = ADVERTISE_1000XPSE_ASYM;
1443         else if (flow_ctrl & FLOW_CTRL_RX)
1444                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445         else
1446                 miireg = 0;
1447
1448         return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453         u8 cap = 0;
1454
1455         if (lcladv & ADVERTISE_1000XPAUSE) {
1456                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457                         if (rmtadv & LPA_1000XPAUSE)
1458                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460                                 cap = FLOW_CTRL_RX;
1461                 } else {
1462                         if (rmtadv & LPA_1000XPAUSE)
1463                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464                 }
1465         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467                         cap = FLOW_CTRL_TX;
1468         }
1469
1470         return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475         u8 autoneg;
1476         u8 flowctrl = 0;
1477         u32 old_rx_mode = tp->rx_mode;
1478         u32 old_tx_mode = tp->tx_mode;
1479
1480         if (tg3_flag(tp, USE_PHYLIB))
1481                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482         else
1483                 autoneg = tp->link_config.autoneg;
1484
1485         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488                 else
1489                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490         } else
1491                 flowctrl = tp->link_config.flowctrl;
1492
1493         tp->link_config.active_flowctrl = flowctrl;
1494
1495         if (flowctrl & FLOW_CTRL_RX)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode)
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503         if (flowctrl & FLOW_CTRL_TX)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode)
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514         u8 oldflowctrl, linkmesg = 0;
1515         u32 mac_mode, lcl_adv, rmt_adv;
1516         struct tg3 *tp = netdev_priv(dev);
1517         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519         spin_lock_bh(&tp->lock);
1520
1521         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522                                     MAC_MODE_HALF_DUPLEX);
1523
1524         oldflowctrl = tp->link_config.active_flowctrl;
1525
1526         if (phydev->link) {
1527                 lcl_adv = 0;
1528                 rmt_adv = 0;
1529
1530                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1532                 else if (phydev->speed == SPEED_1000 ||
1533                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535                 else
1536                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538                 if (phydev->duplex == DUPLEX_HALF)
1539                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1540                 else {
1541                         lcl_adv = tg3_advert_flowctrl_1000T(
1542                                   tp->link_config.flowctrl);
1543
1544                         if (phydev->pause)
1545                                 rmt_adv = LPA_PAUSE_CAP;
1546                         if (phydev->asym_pause)
1547                                 rmt_adv |= LPA_PAUSE_ASYM;
1548                 }
1549
1550                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551         } else
1552                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554         if (mac_mode != tp->mac_mode) {
1555                 tp->mac_mode = mac_mode;
1556                 tw32_f(MAC_MODE, tp->mac_mode);
1557                 udelay(40);
1558         }
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561                 if (phydev->speed == SPEED_10)
1562                         tw32(MAC_MI_STAT,
1563                              MAC_MI_STAT_10MBPS_MODE |
1564                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565                 else
1566                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567         }
1568
1569         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570                 tw32(MAC_TX_LENGTHS,
1571                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572                       (6 << TX_LENGTHS_IPG_SHIFT) |
1573                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574         else
1575                 tw32(MAC_TX_LENGTHS,
1576                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577                       (6 << TX_LENGTHS_IPG_SHIFT) |
1578                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582             phydev->speed != tp->link_config.active_speed ||
1583             phydev->duplex != tp->link_config.active_duplex ||
1584             oldflowctrl != tp->link_config.active_flowctrl)
1585                 linkmesg = 1;
1586
1587         tp->link_config.active_speed = phydev->speed;
1588         tp->link_config.active_duplex = phydev->duplex;
1589
1590         spin_unlock_bh(&tp->lock);
1591
1592         if (linkmesg)
1593                 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598         struct phy_device *phydev;
1599
1600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601                 return 0;
1602
1603         /* Bring the PHY back to a known state. */
1604         tg3_bmcr_reset(tp);
1605
1606         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608         /* Attach the MAC to the PHY. */
1609         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610                              phydev->dev_flags, phydev->interface);
1611         if (IS_ERR(phydev)) {
1612                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613                 return PTR_ERR(phydev);
1614         }
1615
1616         /* Mask with MAC supported features. */
1617         switch (phydev->interface) {
1618         case PHY_INTERFACE_MODE_GMII:
1619         case PHY_INTERFACE_MODE_RGMII:
1620                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621                         phydev->supported &= (PHY_GBIT_FEATURES |
1622                                               SUPPORTED_Pause |
1623                                               SUPPORTED_Asym_Pause);
1624                         break;
1625                 }
1626                 /* fallthru */
1627         case PHY_INTERFACE_MODE_MII:
1628                 phydev->supported &= (PHY_BASIC_FEATURES |
1629                                       SUPPORTED_Pause |
1630                                       SUPPORTED_Asym_Pause);
1631                 break;
1632         default:
1633                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634                 return -EINVAL;
1635         }
1636
1637         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639         phydev->advertising = phydev->supported;
1640
1641         return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646         struct phy_device *phydev;
1647
1648         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649                 return;
1650
1651         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655                 phydev->speed = tp->link_config.orig_speed;
1656                 phydev->duplex = tp->link_config.orig_duplex;
1657                 phydev->autoneg = tp->link_config.orig_autoneg;
1658                 phydev->advertising = tp->link_config.orig_advertising;
1659         }
1660
1661         phy_start(phydev);
1662
1663         phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669                 return;
1670
1671         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679         }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684         u32 phytest;
1685
1686         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687                 u32 phy;
1688
1689                 tg3_writephy(tp, MII_TG3_FET_TEST,
1690                              phytest | MII_TG3_FET_SHADOW_EN);
1691                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692                         if (enable)
1693                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694                         else
1695                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697                 }
1698                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699         }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704         u32 reg;
1705
1706         if (!tg3_flag(tp, 5705_PLUS) ||
1707             (tg3_flag(tp, 5717_PLUS) &&
1708              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709                 return;
1710
1711         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712                 tg3_phy_fet_toggle_apd(tp, enable);
1713                 return;
1714         }
1715
1716         reg = MII_TG3_MISC_SHDW_WREN |
1717               MII_TG3_MISC_SHDW_SCR5_SEL |
1718               MII_TG3_MISC_SHDW_SCR5_LPED |
1719               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720               MII_TG3_MISC_SHDW_SCR5_SDTL |
1721               MII_TG3_MISC_SHDW_SCR5_C125OE;
1722         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728         reg = MII_TG3_MISC_SHDW_WREN |
1729               MII_TG3_MISC_SHDW_APD_SEL |
1730               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731         if (enable)
1732                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739         u32 phy;
1740
1741         if (!tg3_flag(tp, 5705_PLUS) ||
1742             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743                 return;
1744
1745         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746                 u32 ephy;
1747
1748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751                         tg3_writephy(tp, MII_TG3_FET_TEST,
1752                                      ephy | MII_TG3_FET_SHADOW_EN);
1753                         if (!tg3_readphy(tp, reg, &phy)) {
1754                                 if (enable)
1755                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756                                 else
1757                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758                                 tg3_writephy(tp, reg, phy);
1759                         }
1760                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761                 }
1762         } else {
1763                 int ret;
1764
1765                 ret = tg3_phy_auxctl_read(tp,
1766                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767                 if (!ret) {
1768                         if (enable)
1769                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770                         else
1771                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772                         tg3_phy_auxctl_write(tp,
1773                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774                 }
1775         }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780         int ret;
1781         u32 val;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784                 return;
1785
1786         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787         if (!ret)
1788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794         u32 otp, phy;
1795
1796         if (!tp->phy_otp)
1797                 return;
1798
1799         otp = tp->phy_otp;
1800
1801         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802                 return;
1803
1804         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831         u32 val;
1832
1833         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834                 return;
1835
1836         tp->setlpicnt = 0;
1837
1838         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839             current_link_up == 1 &&
1840             tp->link_config.active_duplex == DUPLEX_FULL &&
1841             (tp->link_config.active_speed == SPEED_100 ||
1842              tp->link_config.active_speed == SPEED_1000)) {
1843                 u32 eeectl;
1844
1845                 if (tp->link_config.active_speed == SPEED_1000)
1846                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847                 else
1848                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853                                   TG3_CL45_D7_EEERES_STAT, &val);
1854
1855                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857                         tp->setlpicnt = 2;
1858         }
1859
1860         if (!tp->setlpicnt) {
1861                 if (current_link_up == 1 &&
1862                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865                 }
1866
1867                 val = tr32(TG3_CPMU_EEE_MODE);
1868                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1869         }
1870 }
1871
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1873 {
1874         u32 val;
1875
1876         if (tp->link_config.active_speed == SPEED_1000 &&
1877             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881                 val = MII_TG3_DSP_TAP26_ALNOKO |
1882                       MII_TG3_DSP_TAP26_RMRXSTO;
1883                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1885         }
1886
1887         val = tr32(TG3_CPMU_EEE_MODE);
1888         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1889 }
1890
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1892 {
1893         int limit = 100;
1894
1895         while (limit--) {
1896                 u32 tmp32;
1897
1898                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899                         if ((tmp32 & 0x1000) == 0)
1900                                 break;
1901                 }
1902         }
1903         if (limit < 0)
1904                 return -EBUSY;
1905
1906         return 0;
1907 }
1908
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1910 {
1911         static const u32 test_pat[4][6] = {
1912         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1916         };
1917         int chan;
1918
1919         for (chan = 0; chan < 4; chan++) {
1920                 int i;
1921
1922                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923                              (chan * 0x2000) | 0x0200);
1924                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1925
1926                 for (i = 0; i < 6; i++)
1927                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928                                      test_pat[chan][i]);
1929
1930                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931                 if (tg3_wait_macro_done(tp)) {
1932                         *resetp = 1;
1933                         return -EBUSY;
1934                 }
1935
1936                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937                              (chan * 0x2000) | 0x0200);
1938                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939                 if (tg3_wait_macro_done(tp)) {
1940                         *resetp = 1;
1941                         return -EBUSY;
1942                 }
1943
1944                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945                 if (tg3_wait_macro_done(tp)) {
1946                         *resetp = 1;
1947                         return -EBUSY;
1948                 }
1949
1950                 for (i = 0; i < 6; i += 2) {
1951                         u32 low, high;
1952
1953                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955                             tg3_wait_macro_done(tp)) {
1956                                 *resetp = 1;
1957                                 return -EBUSY;
1958                         }
1959                         low &= 0x7fff;
1960                         high &= 0x000f;
1961                         if (low != test_pat[chan][i] ||
1962                             high != test_pat[chan][i+1]) {
1963                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1966
1967                                 return -EBUSY;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1976 {
1977         int chan;
1978
1979         for (chan = 0; chan < 4; chan++) {
1980                 int i;
1981
1982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983                              (chan * 0x2000) | 0x0200);
1984                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985                 for (i = 0; i < 6; i++)
1986                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988                 if (tg3_wait_macro_done(tp))
1989                         return -EBUSY;
1990         }
1991
1992         return 0;
1993 }
1994
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1996 {
1997         u32 reg32, phy9_orig;
1998         int retries, do_phy_reset, err;
1999
2000         retries = 10;
2001         do_phy_reset = 1;
2002         do {
2003                 if (do_phy_reset) {
2004                         err = tg3_bmcr_reset(tp);
2005                         if (err)
2006                                 return err;
2007                         do_phy_reset = 0;
2008                 }
2009
2010                 /* Disable transmitter and interrupt.  */
2011                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012                         continue;
2013
2014                 reg32 |= 0x3000;
2015                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2016
2017                 /* Set full-duplex, 1000 mbps.  */
2018                 tg3_writephy(tp, MII_BMCR,
2019                              BMCR_FULLDPLX | BMCR_SPEED1000);
2020
2021                 /* Set to master mode.  */
2022                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023                         continue;
2024
2025                 tg3_writephy(tp, MII_CTRL1000,
2026                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2027
2028                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029                 if (err)
2030                         return err;
2031
2032                 /* Block the PHY control access.  */
2033                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2034
2035                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036                 if (!err)
2037                         break;
2038         } while (--retries);
2039
2040         err = tg3_phy_reset_chanpat(tp);
2041         if (err)
2042                 return err;
2043
2044         tg3_phydsp_write(tp, 0x8005, 0x0000);
2045
2046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2048
2049         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2050
2051         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2052
2053         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054                 reg32 &= ~0x3000;
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056         } else if (!err)
2057                 err = -EBUSY;
2058
2059         return err;
2060 }
2061
2062 /* This will reset the tigon3 PHY if there is no valid
2063  * link unless the FORCE argument is non-zero.
2064  */
2065 static int tg3_phy_reset(struct tg3 *tp)
2066 {
2067         u32 val, cpmuctrl;
2068         int err;
2069
2070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071                 val = tr32(GRC_MISC_CFG);
2072                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073                 udelay(40);
2074         }
2075         err  = tg3_readphy(tp, MII_BMSR, &val);
2076         err |= tg3_readphy(tp, MII_BMSR, &val);
2077         if (err != 0)
2078                 return -EBUSY;
2079
2080         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081                 netif_carrier_off(tp->dev);
2082                 tg3_link_report(tp);
2083         }
2084
2085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088                 err = tg3_phy_reset_5703_4_5(tp);
2089                 if (err)
2090                         return err;
2091                 goto out;
2092         }
2093
2094         cpmuctrl = 0;
2095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099                         tw32(TG3_CPMU_CTRL,
2100                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2101         }
2102
2103         err = tg3_bmcr_reset(tp);
2104         if (err)
2105                 return err;
2106
2107         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2110
2111                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2112         }
2113
2114         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2119                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120                         udelay(40);
2121                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2122                 }
2123         }
2124
2125         if (tg3_flag(tp, 5717_PLUS) &&
2126             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127                 return 0;
2128
2129         tg3_phy_apply_otp(tp);
2130
2131         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132                 tg3_phy_toggle_apd(tp, true);
2133         else
2134                 tg3_phy_toggle_apd(tp, false);
2135
2136 out:
2137         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142         }
2143
2144         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147         }
2148
2149         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2152                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2153                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2155                 }
2156         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161                                 tg3_writephy(tp, MII_TG3_TEST1,
2162                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2163                         } else
2164                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2165
2166                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167                 }
2168         }
2169
2170         /* Set Extended packet length bit (bit 14) on all chips that */
2171         /* support jumbo frames */
2172         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173                 /* Cannot do read-modify-write on 5401 */
2174                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176                 /* Set bit 14 with read-modify-write to preserve other bits */
2177                 err = tg3_phy_auxctl_read(tp,
2178                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179                 if (!err)
2180                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2182         }
2183
2184         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185          * jumbo frames transmission.
2186          */
2187         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2191         }
2192
2193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194                 /* adjust output voltage */
2195                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2196         }
2197
2198         tg3_phy_toggle_automdix(tp, 1);
2199         tg3_phy_set_wirespeed(tp);
2200         return 0;
2201 }
2202
2203 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2205 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2206                                           TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211          (TG3_GPIO_MSG_DRVR_PRES << 12))
2212
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217          (TG3_GPIO_MSG_NEED_VAUX << 12))
2218
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2220 {
2221         u32 status, shift;
2222
2223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226         else
2227                 status = tr32(TG3_CPMU_DRV_STATUS);
2228
2229         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230         status &= ~(TG3_GPIO_MSG_MASK << shift);
2231         status |= (newstat << shift);
2232
2233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236         else
2237                 tw32(TG3_CPMU_DRV_STATUS, status);
2238
2239         return status >> TG3_APE_GPIO_MSG_SHIFT;
2240 }
2241
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2243 {
2244         if (!tg3_flag(tp, IS_NIC))
2245                 return 0;
2246
2247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251                         return -EIO;
2252
2253                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2254
2255                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2257
2258                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259         } else {
2260                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2262         }
2263
2264         return 0;
2265 }
2266
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2268 {
2269         u32 grc_local_ctrl;
2270
2271         if (!tg3_flag(tp, IS_NIC) ||
2272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274                 return;
2275
2276         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2277
2278         tw32_wait_f(GRC_LOCAL_CTRL,
2279                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2281
2282         tw32_wait_f(GRC_LOCAL_CTRL,
2283                     grc_local_ctrl,
2284                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2285
2286         tw32_wait_f(GRC_LOCAL_CTRL,
2287                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2289 }
2290
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2292 {
2293         if (!tg3_flag(tp, IS_NIC))
2294                 return;
2295
2296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299                             (GRC_LCLCTRL_GPIO_OE0 |
2300                              GRC_LCLCTRL_GPIO_OE1 |
2301                              GRC_LCLCTRL_GPIO_OE2 |
2302                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2303                              GRC_LCLCTRL_GPIO_OUTPUT1),
2304                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2305         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309                                      GRC_LCLCTRL_GPIO_OE1 |
2310                                      GRC_LCLCTRL_GPIO_OE2 |
2311                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2312                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2313                                      tp->grc_local_ctrl;
2314                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2316
2317                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2320
2321                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2324         } else {
2325                 u32 no_gpio2;
2326                 u32 grc_local_ctrl = 0;
2327
2328                 /* Workaround to prevent overdrawing Amps. */
2329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332                                     grc_local_ctrl,
2333                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2334                 }
2335
2336                 /* On 5753 and variants, GPIO2 cannot be used. */
2337                 no_gpio2 = tp->nic_sram_data_cfg &
2338                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2339
2340                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341                                   GRC_LCLCTRL_GPIO_OE1 |
2342                                   GRC_LCLCTRL_GPIO_OE2 |
2343                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2344                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2345                 if (no_gpio2) {
2346                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2348                 }
2349                 tw32_wait_f(GRC_LOCAL_CTRL,
2350                             tp->grc_local_ctrl | grc_local_ctrl,
2351                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2354
2355                 tw32_wait_f(GRC_LOCAL_CTRL,
2356                             tp->grc_local_ctrl | grc_local_ctrl,
2357                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359                 if (!no_gpio2) {
2360                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361                         tw32_wait_f(GRC_LOCAL_CTRL,
2362                                     tp->grc_local_ctrl | grc_local_ctrl,
2363                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2364                 }
2365         }
2366 }
2367
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2369 {
2370         u32 msg = 0;
2371
2372         /* Serialize power state transitions */
2373         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374                 return;
2375
2376         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377                 msg = TG3_GPIO_MSG_NEED_VAUX;
2378
2379         msg = tg3_set_function_status(tp, msg);
2380
2381         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382                 goto done;
2383
2384         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385                 tg3_pwrsrc_switch_to_vaux(tp);
2386         else
2387                 tg3_pwrsrc_die_with_vmain(tp);
2388
2389 done:
2390         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2391 }
2392
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2394 {
2395         bool need_vaux = false;
2396
2397         /* The GPIOs do something completely different on 57765. */
2398         if (!tg3_flag(tp, IS_NIC) ||
2399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400                 return;
2401
2402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405                 tg3_frob_aux_power_5717(tp, include_wol ?
2406                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407                 return;
2408         }
2409
2410         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411                 struct net_device *dev_peer;
2412
2413                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2414
2415                 /* remove_one() may have been run on the peer. */
2416                 if (dev_peer) {
2417                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2418
2419                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2420                                 return;
2421
2422                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423                             tg3_flag(tp_peer, ENABLE_ASF))
2424                                 need_vaux = true;
2425                 }
2426         }
2427
2428         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429             tg3_flag(tp, ENABLE_ASF))
2430                 need_vaux = true;
2431
2432         if (need_vaux)
2433                 tg3_pwrsrc_switch_to_vaux(tp);
2434         else
2435                 tg3_pwrsrc_die_with_vmain(tp);
2436 }
2437
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2439 {
2440         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441                 return 1;
2442         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443                 if (speed != SPEED_10)
2444                         return 1;
2445         } else if (speed == SPEED_10)
2446                 return 1;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_setup_phy(struct tg3 *, int);
2452
2453 #define RESET_KIND_SHUTDOWN     0
2454 #define RESET_KIND_INIT         1
2455 #define RESET_KIND_SUSPEND      2
2456
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2459
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2461 {
2462         u32 val;
2463
2464         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2468
2469                         sg_dig_ctrl |=
2470                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2473                 }
2474                 return;
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 tg3_bmcr_reset(tp);
2479                 val = tr32(GRC_MISC_CFG);
2480                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481                 udelay(40);
2482                 return;
2483         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484                 u32 phytest;
2485                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486                         u32 phy;
2487
2488                         tg3_writephy(tp, MII_ADVERTISE, 0);
2489                         tg3_writephy(tp, MII_BMCR,
2490                                      BMCR_ANENABLE | BMCR_ANRESTART);
2491
2492                         tg3_writephy(tp, MII_TG3_FET_TEST,
2493                                      phytest | MII_TG3_FET_SHADOW_EN);
2494                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496                                 tg3_writephy(tp,
2497                                              MII_TG3_FET_SHDW_AUXMODE4,
2498                                              phy);
2499                         }
2500                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2501                 }
2502                 return;
2503         } else if (do_low_power) {
2504                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2506
2507                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2510                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2511         }
2512
2513         /* The PHY should not be powered down on some chips because
2514          * of bugs.
2515          */
2516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520                 return;
2521
2522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2528         }
2529
2530         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2531 }
2532
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, NVRAM)) {
2537                 int i;
2538
2539                 if (tp->nvram_lock_cnt == 0) {
2540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541                         for (i = 0; i < 8000; i++) {
2542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543                                         break;
2544                                 udelay(20);
2545                         }
2546                         if (i == 8000) {
2547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548                                 return -ENODEV;
2549                         }
2550                 }
2551                 tp->nvram_lock_cnt++;
2552         }
2553         return 0;
2554 }
2555
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2558 {
2559         if (tg3_flag(tp, NVRAM)) {
2560                 if (tp->nvram_lock_cnt > 0)
2561                         tp->nvram_lock_cnt--;
2562                 if (tp->nvram_lock_cnt == 0)
2563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2564         }
2565 }
2566
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2569 {
2570         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571                 u32 nvaccess = tr32(NVRAM_ACCESS);
2572
2573                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2574         }
2575 }
2576
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2579 {
2580         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581                 u32 nvaccess = tr32(NVRAM_ACCESS);
2582
2583                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2584         }
2585 }
2586
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588                                         u32 offset, u32 *val)
2589 {
2590         u32 tmp;
2591         int i;
2592
2593         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594                 return -EINVAL;
2595
2596         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597                                         EEPROM_ADDR_DEVID_MASK |
2598                                         EEPROM_ADDR_READ);
2599         tw32(GRC_EEPROM_ADDR,
2600              tmp |
2601              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603               EEPROM_ADDR_ADDR_MASK) |
2604              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2605
2606         for (i = 0; i < 1000; i++) {
2607                 tmp = tr32(GRC_EEPROM_ADDR);
2608
2609                 if (tmp & EEPROM_ADDR_COMPLETE)
2610                         break;
2611                 msleep(1);
2612         }
2613         if (!(tmp & EEPROM_ADDR_COMPLETE))
2614                 return -EBUSY;
2615
2616         tmp = tr32(GRC_EEPROM_DATA);
2617
2618         /*
2619          * The data will always be opposite the native endian
2620          * format.  Perform a blind byteswap to compensate.
2621          */
2622         *val = swab32(tmp);
2623
2624         return 0;
2625 }
2626
2627 #define NVRAM_CMD_TIMEOUT 10000
2628
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2630 {
2631         int i;
2632
2633         tw32(NVRAM_CMD, nvram_cmd);
2634         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635                 udelay(10);
2636                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637                         udelay(10);
2638                         break;
2639                 }
2640         }
2641
2642         if (i == NVRAM_CMD_TIMEOUT)
2643                 return -EBUSY;
2644
2645         return 0;
2646 }
2647
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2649 {
2650         if (tg3_flag(tp, NVRAM) &&
2651             tg3_flag(tp, NVRAM_BUFFERED) &&
2652             tg3_flag(tp, FLASH) &&
2653             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654             (tp->nvram_jedecnum == JEDEC_ATMEL))
2655
2656                 addr = ((addr / tp->nvram_pagesize) <<
2657                         ATMEL_AT45DB0X1B_PAGE_POS) +
2658                        (addr % tp->nvram_pagesize);
2659
2660         return addr;
2661 }
2662
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2664 {
2665         if (tg3_flag(tp, NVRAM) &&
2666             tg3_flag(tp, NVRAM_BUFFERED) &&
2667             tg3_flag(tp, FLASH) &&
2668             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669             (tp->nvram_jedecnum == JEDEC_ATMEL))
2670
2671                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672                         tp->nvram_pagesize) +
2673                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2674
2675         return addr;
2676 }
2677
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679  * the byteswapping settings for all other register accesses.
2680  * tg3 devices are BE devices, so on a BE machine, the data
2681  * returned will be exactly as it is seen in NVRAM.  On a LE
2682  * machine, the 32-bit value will be byteswapped.
2683  */
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2685 {
2686         int ret;
2687
2688         if (!tg3_flag(tp, NVRAM))
2689                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2690
2691         offset = tg3_nvram_phys_addr(tp, offset);
2692
2693         if (offset > NVRAM_ADDR_MSK)
2694                 return -EINVAL;
2695
2696         ret = tg3_nvram_lock(tp);
2697         if (ret)
2698                 return ret;
2699
2700         tg3_enable_nvram_access(tp);
2701
2702         tw32(NVRAM_ADDR, offset);
2703         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2705
2706         if (ret == 0)
2707                 *val = tr32(NVRAM_RDDATA);
2708
2709         tg3_disable_nvram_access(tp);
2710
2711         tg3_nvram_unlock(tp);
2712
2713         return ret;
2714 }
2715
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2718 {
2719         u32 v;
2720         int res = tg3_nvram_read(tp, offset, &v);
2721         if (!res)
2722                 *val = cpu_to_be32(v);
2723         return res;
2724 }
2725
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2728 {
2729         u32 addr_high, addr_low;
2730         int i;
2731
2732         addr_high = ((tp->dev->dev_addr[0] << 8) |
2733                      tp->dev->dev_addr[1]);
2734         addr_low = ((tp->dev->dev_addr[2] << 24) |
2735                     (tp->dev->dev_addr[3] << 16) |
2736                     (tp->dev->dev_addr[4] <<  8) |
2737                     (tp->dev->dev_addr[5] <<  0));
2738         for (i = 0; i < 4; i++) {
2739                 if (i == 1 && skip_mac_1)
2740                         continue;
2741                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747                 for (i = 0; i < 12; i++) {
2748                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2750                 }
2751         }
2752
2753         addr_high = (tp->dev->dev_addr[0] +
2754                      tp->dev->dev_addr[1] +
2755                      tp->dev->dev_addr[2] +
2756                      tp->dev->dev_addr[3] +
2757                      tp->dev->dev_addr[4] +
2758                      tp->dev->dev_addr[5]) &
2759                 TX_BACKOFF_SEED_MASK;
2760         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2761 }
2762
2763 static void tg3_enable_register_access(struct tg3 *tp)
2764 {
2765         /*
2766          * Make sure register accesses (indirect or otherwise) will function
2767          * correctly.
2768          */
2769         pci_write_config_dword(tp->pdev,
2770                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2771 }
2772
2773 static int tg3_power_up(struct tg3 *tp)
2774 {
2775         int err;
2776
2777         tg3_enable_register_access(tp);
2778
2779         err = pci_set_power_state(tp->pdev, PCI_D0);
2780         if (!err) {
2781                 /* Switch out of Vaux if it is a NIC */
2782                 tg3_pwrsrc_switch_to_vmain(tp);
2783         } else {
2784                 netdev_err(tp->dev, "Transition to D0 failed\n");
2785         }
2786
2787         return err;
2788 }
2789
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2791 {
2792         u32 misc_host_ctrl;
2793         bool device_should_wake, do_low_power;
2794
2795         tg3_enable_register_access(tp);
2796
2797         /* Restore the CLKREQ setting. */
2798         if (tg3_flag(tp, CLKREQ_BUG)) {
2799                 u16 lnkctl;
2800
2801                 pci_read_config_word(tp->pdev,
2802                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803                                      &lnkctl);
2804                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805                 pci_write_config_word(tp->pdev,
2806                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807                                       lnkctl);
2808         }
2809
2810         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811         tw32(TG3PCI_MISC_HOST_CTRL,
2812              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2813
2814         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815                              tg3_flag(tp, WOL_ENABLE);
2816
2817         if (tg3_flag(tp, USE_PHYLIB)) {
2818                 do_low_power = false;
2819                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821                         struct phy_device *phydev;
2822                         u32 phyid, advertising;
2823
2824                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2825
2826                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2827
2828                         tp->link_config.orig_speed = phydev->speed;
2829                         tp->link_config.orig_duplex = phydev->duplex;
2830                         tp->link_config.orig_autoneg = phydev->autoneg;
2831                         tp->link_config.orig_advertising = phydev->advertising;
2832
2833                         advertising = ADVERTISED_TP |
2834                                       ADVERTISED_Pause |
2835                                       ADVERTISED_Autoneg |
2836                                       ADVERTISED_10baseT_Half;
2837
2838                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2840                                         advertising |=
2841                                                 ADVERTISED_100baseT_Half |
2842                                                 ADVERTISED_100baseT_Full |
2843                                                 ADVERTISED_10baseT_Full;
2844                                 else
2845                                         advertising |= ADVERTISED_10baseT_Full;
2846                         }
2847
2848                         phydev->advertising = advertising;
2849
2850                         phy_start_aneg(phydev);
2851
2852                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853                         if (phyid != PHY_ID_BCMAC131) {
2854                                 phyid &= PHY_BCM_OUI_MASK;
2855                                 if (phyid == PHY_BCM_OUI_1 ||
2856                                     phyid == PHY_BCM_OUI_2 ||
2857                                     phyid == PHY_BCM_OUI_3)
2858                                         do_low_power = true;
2859                         }
2860                 }
2861         } else {
2862                 do_low_power = true;
2863
2864                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866                         tp->link_config.orig_speed = tp->link_config.speed;
2867                         tp->link_config.orig_duplex = tp->link_config.duplex;
2868                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2869                 }
2870
2871                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872                         tp->link_config.speed = SPEED_10;
2873                         tp->link_config.duplex = DUPLEX_HALF;
2874                         tp->link_config.autoneg = AUTONEG_ENABLE;
2875                         tg3_setup_phy(tp, 0);
2876                 }
2877         }
2878
2879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880                 u32 val;
2881
2882                 val = tr32(GRC_VCPU_EXT_CTRL);
2883                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885                 int i;
2886                 u32 val;
2887
2888                 for (i = 0; i < 200; i++) {
2889                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891                                 break;
2892                         msleep(1);
2893                 }
2894         }
2895         if (tg3_flag(tp, WOL_CAP))
2896                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897                                                      WOL_DRV_STATE_SHUTDOWN |
2898                                                      WOL_DRV_WOL |
2899                                                      WOL_SET_MAGIC_PKT);
2900
2901         if (device_should_wake) {
2902                 u32 mac_mode;
2903
2904                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905                         if (do_low_power &&
2906                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907                                 tg3_phy_auxctl_write(tp,
2908                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2910                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912                                 udelay(40);
2913                         }
2914
2915                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917                         else
2918                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2919
2920                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922                             ASIC_REV_5700) {
2923                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924                                              SPEED_100 : SPEED_10;
2925                                 if (tg3_5700_link_polarity(tp, speed))
2926                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2927                                 else
2928                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929                         }
2930                 } else {
2931                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2932                 }
2933
2934                 if (!tg3_flag(tp, 5750_PLUS))
2935                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2936
2937                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2941
2942                 if (tg3_flag(tp, ENABLE_APE))
2943                         mac_mode |= MAC_MODE_APE_TX_EN |
2944                                     MAC_MODE_APE_RX_EN |
2945                                     MAC_MODE_TDE_ENABLE;
2946
2947                 tw32_f(MAC_MODE, mac_mode);
2948                 udelay(100);
2949
2950                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951                 udelay(10);
2952         }
2953
2954         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957                 u32 base_val;
2958
2959                 base_val = tp->pci_clock_ctrl;
2960                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961                              CLOCK_CTRL_TXCLK_DISABLE);
2962
2963                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965         } else if (tg3_flag(tp, 5780_CLASS) ||
2966                    tg3_flag(tp, CPMU_PRESENT) ||
2967                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968                 /* do nothing */
2969         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970                 u32 newbits1, newbits2;
2971
2972                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975                                     CLOCK_CTRL_TXCLK_DISABLE |
2976                                     CLOCK_CTRL_ALTCLK);
2977                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978                 } else if (tg3_flag(tp, 5705_PLUS)) {
2979                         newbits1 = CLOCK_CTRL_625_CORE;
2980                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981                 } else {
2982                         newbits1 = CLOCK_CTRL_ALTCLK;
2983                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2984                 }
2985
2986                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987                             40);
2988
2989                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990                             40);
2991
2992                 if (!tg3_flag(tp, 5705_PLUS)) {
2993                         u32 newbits3;
2994
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998                                             CLOCK_CTRL_TXCLK_DISABLE |
2999                                             CLOCK_CTRL_44MHZ_CORE);
3000                         } else {
3001                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3002                         }
3003
3004                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005                                     tp->pci_clock_ctrl | newbits3, 40);
3006                 }
3007         }
3008
3009         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010                 tg3_power_down_phy(tp, do_low_power);
3011
3012         tg3_frob_aux_power(tp, true);
3013
3014         /* Workaround for unstable PLL clock */
3015         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017                 u32 val = tr32(0x7d00);
3018
3019                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020                 tw32(0x7d00, val);
3021                 if (!tg3_flag(tp, ENABLE_ASF)) {
3022                         int err;
3023
3024                         err = tg3_nvram_lock(tp);
3025                         tg3_halt_cpu(tp, RX_CPU_BASE);
3026                         if (!err)
3027                                 tg3_nvram_unlock(tp);
3028                 }
3029         }
3030
3031         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3032
3033         return 0;
3034 }
3035
3036 static void tg3_power_down(struct tg3 *tp)
3037 {
3038         tg3_power_down_prepare(tp);
3039
3040         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041         pci_set_power_state(tp->pdev, PCI_D3hot);
3042 }
3043
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3045 {
3046         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047         case MII_TG3_AUX_STAT_10HALF:
3048                 *speed = SPEED_10;
3049                 *duplex = DUPLEX_HALF;
3050                 break;
3051
3052         case MII_TG3_AUX_STAT_10FULL:
3053                 *speed = SPEED_10;
3054                 *duplex = DUPLEX_FULL;
3055                 break;
3056
3057         case MII_TG3_AUX_STAT_100HALF:
3058                 *speed = SPEED_100;
3059                 *duplex = DUPLEX_HALF;
3060                 break;
3061
3062         case MII_TG3_AUX_STAT_100FULL:
3063                 *speed = SPEED_100;
3064                 *duplex = DUPLEX_FULL;
3065                 break;
3066
3067         case MII_TG3_AUX_STAT_1000HALF:
3068                 *speed = SPEED_1000;
3069                 *duplex = DUPLEX_HALF;
3070                 break;
3071
3072         case MII_TG3_AUX_STAT_1000FULL:
3073                 *speed = SPEED_1000;
3074                 *duplex = DUPLEX_FULL;
3075                 break;
3076
3077         default:
3078                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080                                  SPEED_10;
3081                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082                                   DUPLEX_HALF;
3083                         break;
3084                 }
3085                 *speed = SPEED_INVALID;
3086                 *duplex = DUPLEX_INVALID;
3087                 break;
3088         }
3089 }
3090
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3092 {
3093         int err = 0;
3094         u32 val, new_adv;
3095
3096         new_adv = ADVERTISE_CSMA;
3097         if (advertise & ADVERTISED_10baseT_Half)
3098                 new_adv |= ADVERTISE_10HALF;
3099         if (advertise & ADVERTISED_10baseT_Full)
3100                 new_adv |= ADVERTISE_10FULL;
3101         if (advertise & ADVERTISED_100baseT_Half)
3102                 new_adv |= ADVERTISE_100HALF;
3103         if (advertise & ADVERTISED_100baseT_Full)
3104                 new_adv |= ADVERTISE_100FULL;
3105
3106         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3107
3108         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109         if (err)
3110                 goto done;
3111
3112         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113                 goto done;
3114
3115         new_adv = 0;
3116         if (advertise & ADVERTISED_1000baseT_Half)
3117                 new_adv |= ADVERTISE_1000HALF;
3118         if (advertise & ADVERTISED_1000baseT_Full)
3119                 new_adv |= ADVERTISE_1000FULL;
3120
3121         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3124
3125         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126         if (err)
3127                 goto done;
3128
3129         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130                 goto done;
3131
3132         tw32(TG3_CPMU_EEE_MODE,
3133              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3134
3135         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136         if (!err) {
3137                 u32 err2;
3138
3139                 val = 0;
3140                 /* Advertise 100-BaseTX EEE ability */
3141                 if (advertise & ADVERTISED_100baseT_Full)
3142                         val |= MDIO_AN_EEE_ADV_100TX;
3143                 /* Advertise 1000-BaseT EEE ability */
3144                 if (advertise & ADVERTISED_1000baseT_Full)
3145                         val |= MDIO_AN_EEE_ADV_1000T;
3146                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147                 if (err)
3148                         val = 0;
3149
3150                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151                 case ASIC_REV_5717:
3152                 case ASIC_REV_57765:
3153                 case ASIC_REV_5719:
3154                         /* If we advertised any eee advertisements above... */
3155                         if (val)
3156                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3157                                       MII_TG3_DSP_TAP26_RMRXSTO |
3158                                       MII_TG3_DSP_TAP26_OPCSINPT;
3159                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160                         /* Fall through */
3161                 case ASIC_REV_5720:
3162                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3165                 }
3166
3167                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168                 if (!err)
3169                         err = err2;
3170         }
3171
3172 done:
3173         return err;
3174 }
3175
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3177 {
3178         u32 new_adv;
3179         int i;
3180
3181         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182                 new_adv = ADVERTISED_10baseT_Half |
3183                           ADVERTISED_10baseT_Full;
3184                 if (tg3_flag(tp, WOL_SPEED_100MB))
3185                         new_adv |= ADVERTISED_100baseT_Half |
3186                                    ADVERTISED_100baseT_Full;
3187
3188                 tg3_phy_autoneg_cfg(tp, new_adv,
3189                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3190         } else if (tp->link_config.speed == SPEED_INVALID) {
3191                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192                         tp->link_config.advertising &=
3193                                 ~(ADVERTISED_1000baseT_Half |
3194                                   ADVERTISED_1000baseT_Full);
3195
3196                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197                                     tp->link_config.flowctrl);
3198         } else {
3199                 /* Asking for a specific link mode. */
3200                 if (tp->link_config.speed == SPEED_1000) {
3201                         if (tp->link_config.duplex == DUPLEX_FULL)
3202                                 new_adv = ADVERTISED_1000baseT_Full;
3203                         else
3204                                 new_adv = ADVERTISED_1000baseT_Half;
3205                 } else if (tp->link_config.speed == SPEED_100) {
3206                         if (tp->link_config.duplex == DUPLEX_FULL)
3207                                 new_adv = ADVERTISED_100baseT_Full;
3208                         else
3209                                 new_adv = ADVERTISED_100baseT_Half;
3210                 } else {
3211                         if (tp->link_config.duplex == DUPLEX_FULL)
3212                                 new_adv = ADVERTISED_10baseT_Full;
3213                         else
3214                                 new_adv = ADVERTISED_10baseT_Half;
3215                 }
3216
3217                 tg3_phy_autoneg_cfg(tp, new_adv,
3218                                     tp->link_config.flowctrl);
3219         }
3220
3221         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222             tp->link_config.speed != SPEED_INVALID) {
3223                 u32 bmcr, orig_bmcr;
3224
3225                 tp->link_config.active_speed = tp->link_config.speed;
3226                 tp->link_config.active_duplex = tp->link_config.duplex;
3227
3228                 bmcr = 0;
3229                 switch (tp->link_config.speed) {
3230                 default:
3231                 case SPEED_10:
3232                         break;
3233
3234                 case SPEED_100:
3235                         bmcr |= BMCR_SPEED100;
3236                         break;
3237
3238                 case SPEED_1000:
3239                         bmcr |= BMCR_SPEED1000;
3240                         break;
3241                 }
3242
3243                 if (tp->link_config.duplex == DUPLEX_FULL)
3244                         bmcr |= BMCR_FULLDPLX;
3245
3246                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247                     (bmcr != orig_bmcr)) {
3248                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249                         for (i = 0; i < 1500; i++) {
3250                                 u32 tmp;
3251
3252                                 udelay(10);
3253                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254                                     tg3_readphy(tp, MII_BMSR, &tmp))
3255                                         continue;
3256                                 if (!(tmp & BMSR_LSTATUS)) {
3257                                         udelay(40);
3258                                         break;
3259                                 }
3260                         }
3261                         tg3_writephy(tp, MII_BMCR, bmcr);
3262                         udelay(40);
3263                 }
3264         } else {
3265                 tg3_writephy(tp, MII_BMCR,
3266                              BMCR_ANENABLE | BMCR_ANRESTART);
3267         }
3268 }
3269
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3271 {
3272         int err;
3273
3274         /* Turn off tap power management. */
3275         /* Set Extended packet length bit */
3276         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3277
3278         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3283
3284         udelay(40);
3285
3286         return err;
3287 }
3288
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3290 {
3291         u32 adv_reg, all_mask = 0;
3292
3293         if (mask & ADVERTISED_10baseT_Half)
3294                 all_mask |= ADVERTISE_10HALF;
3295         if (mask & ADVERTISED_10baseT_Full)
3296                 all_mask |= ADVERTISE_10FULL;
3297         if (mask & ADVERTISED_100baseT_Half)
3298                 all_mask |= ADVERTISE_100HALF;
3299         if (mask & ADVERTISED_100baseT_Full)
3300                 all_mask |= ADVERTISE_100FULL;
3301
3302         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303                 return 0;
3304
3305         if ((adv_reg & all_mask) != all_mask)
3306                 return 0;
3307         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308                 u32 tg3_ctrl;
3309
3310                 all_mask = 0;
3311                 if (mask & ADVERTISED_1000baseT_Half)
3312                         all_mask |= ADVERTISE_1000HALF;
3313                 if (mask & ADVERTISED_1000baseT_Full)
3314                         all_mask |= ADVERTISE_1000FULL;
3315
3316                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317                         return 0;
3318
3319                 if ((tg3_ctrl & all_mask) != all_mask)
3320                         return 0;
3321         }
3322         return 1;
3323 }
3324
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3326 {
3327         u32 curadv, reqadv;
3328
3329         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330                 return 1;
3331
3332         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3334
3335         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336                 if (curadv != reqadv)
3337                         return 0;
3338
3339                 if (tg3_flag(tp, PAUSE_AUTONEG))
3340                         tg3_readphy(tp, MII_LPA, rmtadv);
3341         } else {
3342                 /* Reprogram the advertisement register, even if it
3343                  * does not affect the current link.  If the link
3344                  * gets renegotiated in the future, we can save an
3345                  * additional renegotiation cycle by advertising
3346                  * it correctly in the first place.
3347                  */
3348                 if (curadv != reqadv) {
3349                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350                                      ADVERTISE_PAUSE_ASYM);
3351                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3352                 }
3353         }
3354
3355         return 1;
3356 }
3357
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3359 {
3360         int current_link_up;
3361         u32 bmsr, val;
3362         u32 lcl_adv, rmt_adv;
3363         u16 current_speed;
3364         u8 current_duplex;
3365         int i, err;
3366
3367         tw32(MAC_EVENT, 0);
3368
3369         tw32_f(MAC_STATUS,
3370              (MAC_STATUS_SYNC_CHANGED |
3371               MAC_STATUS_CFG_CHANGED |
3372               MAC_STATUS_MI_COMPLETION |
3373               MAC_STATUS_LNKSTATE_CHANGED));
3374         udelay(40);
3375
3376         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377                 tw32_f(MAC_MI_MODE,
3378                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379                 udelay(80);
3380         }
3381
3382         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3383
3384         /* Some third-party PHYs need to be reset on link going
3385          * down.
3386          */
3387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390             netif_carrier_ok(tp->dev)) {
3391                 tg3_readphy(tp, MII_BMSR, &bmsr);
3392                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393                     !(bmsr & BMSR_LSTATUS))
3394                         force_reset = 1;
3395         }
3396         if (force_reset)
3397                 tg3_phy_reset(tp);
3398
3399         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400                 tg3_readphy(tp, MII_BMSR, &bmsr);
3401                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402                     !tg3_flag(tp, INIT_COMPLETE))
3403                         bmsr = 0;
3404
3405                 if (!(bmsr & BMSR_LSTATUS)) {
3406                         err = tg3_init_5401phy_dsp(tp);
3407                         if (err)
3408                                 return err;
3409
3410                         tg3_readphy(tp, MII_BMSR, &bmsr);
3411                         for (i = 0; i < 1000; i++) {
3412                                 udelay(10);
3413                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414                                     (bmsr & BMSR_LSTATUS)) {
3415                                         udelay(40);
3416                                         break;
3417                                 }
3418                         }
3419
3420                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421                             TG3_PHY_REV_BCM5401_B0 &&
3422                             !(bmsr & BMSR_LSTATUS) &&
3423                             tp->link_config.active_speed == SPEED_1000) {
3424                                 err = tg3_phy_reset(tp);
3425                                 if (!err)
3426                                         err = tg3_init_5401phy_dsp(tp);
3427                                 if (err)
3428                                         return err;
3429                         }
3430                 }
3431         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433                 /* 5701 {A0,B0} CRC bug workaround */
3434                 tg3_writephy(tp, 0x15, 0x0a75);
3435                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3438         }
3439
3440         /* Clear pending interrupts... */
3441         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443
3444         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3448
3449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454                 else
3455                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3456         }
3457
3458         current_link_up = 0;
3459         current_speed = SPEED_INVALID;
3460         current_duplex = DUPLEX_INVALID;
3461
3462         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463                 err = tg3_phy_auxctl_read(tp,
3464                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465                                           &val);
3466                 if (!err && !(val & (1 << 10))) {
3467                         tg3_phy_auxctl_write(tp,
3468                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469                                              val | (1 << 10));
3470                         goto relink;
3471                 }
3472         }
3473
3474         bmsr = 0;
3475         for (i = 0; i < 100; i++) {
3476                 tg3_readphy(tp, MII_BMSR, &bmsr);
3477                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478                     (bmsr & BMSR_LSTATUS))
3479                         break;
3480                 udelay(40);
3481         }
3482
3483         if (bmsr & BMSR_LSTATUS) {
3484                 u32 aux_stat, bmcr;
3485
3486                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487                 for (i = 0; i < 2000; i++) {
3488                         udelay(10);
3489                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490                             aux_stat)
3491                                 break;
3492                 }
3493
3494                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495                                              &current_speed,
3496                                              &current_duplex);
3497
3498                 bmcr = 0;
3499                 for (i = 0; i < 200; i++) {
3500                         tg3_readphy(tp, MII_BMCR, &bmcr);
3501                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502                                 continue;
3503                         if (bmcr && bmcr != 0x7fff)
3504                                 break;
3505                         udelay(10);
3506                 }
3507
3508                 lcl_adv = 0;
3509                 rmt_adv = 0;
3510
3511                 tp->link_config.active_speed = current_speed;
3512                 tp->link_config.active_duplex = current_duplex;
3513
3514                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515                         if ((bmcr & BMCR_ANENABLE) &&
3516                             tg3_copper_is_advertising_all(tp,
3517                                                 tp->link_config.advertising)) {
3518                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519                                                                   &rmt_adv))
3520                                         current_link_up = 1;
3521                         }
3522                 } else {
3523                         if (!(bmcr & BMCR_ANENABLE) &&
3524                             tp->link_config.speed == current_speed &&
3525                             tp->link_config.duplex == current_duplex &&
3526                             tp->link_config.flowctrl ==
3527                             tp->link_config.active_flowctrl) {
3528                                 current_link_up = 1;
3529                         }
3530                 }
3531
3532                 if (current_link_up == 1 &&
3533                     tp->link_config.active_duplex == DUPLEX_FULL)
3534                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3535         }
3536
3537 relink:
3538         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539                 tg3_phy_copper_begin(tp);
3540
3541                 tg3_readphy(tp, MII_BMSR, &bmsr);
3542                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544                         current_link_up = 1;
3545         }
3546
3547         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548         if (current_link_up == 1) {
3549                 if (tp->link_config.active_speed == SPEED_100 ||
3550                     tp->link_config.active_speed == SPEED_10)
3551                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552                 else
3553                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556         else
3557                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3558
3559         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560         if (tp->link_config.active_duplex == DUPLEX_HALF)
3561                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3562
3563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564                 if (current_link_up == 1 &&
3565                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567                 else
3568                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3569         }
3570
3571         /* ??? Without this setting Netgear GA302T PHY does not
3572          * ??? send/receive packets...
3573          */
3574         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578                 udelay(80);
3579         }
3580
3581         tw32_f(MAC_MODE, tp->mac_mode);
3582         udelay(40);
3583
3584         tg3_phy_eee_adjust(tp, current_link_up);
3585
3586         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587                 /* Polled via timer. */
3588                 tw32_f(MAC_EVENT, 0);
3589         } else {
3590                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3591         }
3592         udelay(40);
3593
3594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595             current_link_up == 1 &&
3596             tp->link_config.active_speed == SPEED_1000 &&
3597             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598                 udelay(120);
3599                 tw32_f(MAC_STATUS,
3600                      (MAC_STATUS_SYNC_CHANGED |
3601                       MAC_STATUS_CFG_CHANGED));
3602                 udelay(40);
3603                 tg3_write_mem(tp,
3604                               NIC_SRAM_FIRMWARE_MBOX,
3605                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3606         }
3607
3608         /* Prevent send BD corruption. */
3609         if (tg3_flag(tp, CLKREQ_BUG)) {
3610                 u16 oldlnkctl, newlnkctl;
3611
3612                 pci_read_config_word(tp->pdev,
3613                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614                                      &oldlnkctl);
3615                 if (tp->link_config.active_speed == SPEED_100 ||
3616                     tp->link_config.active_speed == SPEED_10)
3617                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618                 else
3619                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620                 if (newlnkctl != oldlnkctl)
3621                         pci_write_config_word(tp->pdev,
3622                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623                                               newlnkctl);
3624         }
3625
3626         if (current_link_up != netif_carrier_ok(tp->dev)) {
3627                 if (current_link_up)
3628                         netif_carrier_on(tp->dev);
3629                 else
3630                         netif_carrier_off(tp->dev);
3631                 tg3_link_report(tp);
3632         }
3633
3634         return 0;
3635 }
3636
3637 struct tg3_fiber_aneginfo {
3638         int state;
3639 #define ANEG_STATE_UNKNOWN              0
3640 #define ANEG_STATE_AN_ENABLE            1
3641 #define ANEG_STATE_RESTART_INIT         2
3642 #define ANEG_STATE_RESTART              3
3643 #define ANEG_STATE_DISABLE_LINK_OK      4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3645 #define ANEG_STATE_ABILITY_DETECT       6
3646 #define ANEG_STATE_ACK_DETECT_INIT      7
3647 #define ANEG_STATE_ACK_DETECT           8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3649 #define ANEG_STATE_COMPLETE_ACK         10
3650 #define ANEG_STATE_IDLE_DETECT_INIT     11
3651 #define ANEG_STATE_IDLE_DETECT          12
3652 #define ANEG_STATE_LINK_OK              13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3655
3656         u32 flags;
3657 #define MR_AN_ENABLE            0x00000001
3658 #define MR_RESTART_AN           0x00000002
3659 #define MR_AN_COMPLETE          0x00000004
3660 #define MR_PAGE_RX              0x00000008
3661 #define MR_NP_LOADED            0x00000010
3662 #define MR_TOGGLE_TX            0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3670 #define MR_TOGGLE_RX            0x00002000
3671 #define MR_NP_RX                0x00004000
3672
3673 #define MR_LINK_OK              0x80000000
3674
3675         unsigned long link_time, cur_time;
3676
3677         u32 ability_match_cfg;
3678         int ability_match_count;
3679
3680         char ability_match, idle_match, ack_match;
3681
3682         u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP             0x00000080
3684 #define ANEG_CFG_ACK            0x00000040
3685 #define ANEG_CFG_RF2            0x00000020
3686 #define ANEG_CFG_RF1            0x00000010
3687 #define ANEG_CFG_PS2            0x00000001
3688 #define ANEG_CFG_PS1            0x00008000
3689 #define ANEG_CFG_HD             0x00004000
3690 #define ANEG_CFG_FD             0x00002000
3691 #define ANEG_CFG_INVAL          0x00001f06
3692
3693 };
3694 #define ANEG_OK         0
3695 #define ANEG_DONE       1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED     -1
3698
3699 #define ANEG_STATE_SETTLE_TIME  10000
3700
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702                                    struct tg3_fiber_aneginfo *ap)
3703 {
3704         u16 flowctrl;
3705         unsigned long delta;
3706         u32 rx_cfg_reg;
3707         int ret;
3708
3709         if (ap->state == ANEG_STATE_UNKNOWN) {
3710                 ap->rxconfig = 0;
3711                 ap->link_time = 0;
3712                 ap->cur_time = 0;
3713                 ap->ability_match_cfg = 0;
3714                 ap->ability_match_count = 0;
3715                 ap->ability_match = 0;
3716                 ap->idle_match = 0;
3717                 ap->ack_match = 0;
3718         }
3719         ap->cur_time++;
3720
3721         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3723
3724                 if (rx_cfg_reg != ap->ability_match_cfg) {
3725                         ap->ability_match_cfg = rx_cfg_reg;
3726                         ap->ability_match = 0;
3727                         ap->ability_match_count = 0;
3728                 } else {
3729                         if (++ap->ability_match_count > 1) {
3730                                 ap->ability_match = 1;
3731                                 ap->ability_match_cfg = rx_cfg_reg;
3732                         }
3733                 }
3734                 if (rx_cfg_reg & ANEG_CFG_ACK)
3735                         ap->ack_match = 1;
3736                 else
3737                         ap->ack_match = 0;
3738
3739                 ap->idle_match = 0;
3740         } else {
3741                 ap->idle_match = 1;
3742                 ap->ability_match_cfg = 0;
3743                 ap->ability_match_count = 0;
3744                 ap->ability_match = 0;
3745                 ap->ack_match = 0;
3746
3747                 rx_cfg_reg = 0;
3748         }
3749
3750         ap->rxconfig = rx_cfg_reg;
3751         ret = ANEG_OK;
3752
3753         switch (ap->state) {
3754         case ANEG_STATE_UNKNOWN:
3755                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757
3758                 /* fallthru */
3759         case ANEG_STATE_AN_ENABLE:
3760                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761                 if (ap->flags & MR_AN_ENABLE) {
3762                         ap->link_time = 0;
3763                         ap->cur_time = 0;
3764                         ap->ability_match_cfg = 0;
3765                         ap->ability_match_count = 0;
3766                         ap->ability_match = 0;
3767                         ap->idle_match = 0;
3768                         ap->ack_match = 0;
3769
3770                         ap->state = ANEG_STATE_RESTART_INIT;
3771                 } else {
3772                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_RESTART_INIT:
3777                 ap->link_time = ap->cur_time;
3778                 ap->flags &= ~(MR_NP_LOADED);
3779                 ap->txconfig = 0;
3780                 tw32(MAC_TX_AUTO_NEG, 0);
3781                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782                 tw32_f(MAC_MODE, tp->mac_mode);
3783                 udelay(40);
3784
3785                 ret = ANEG_TIMER_ENAB;
3786                 ap->state = ANEG_STATE_RESTART;
3787
3788                 /* fallthru */
3789         case ANEG_STATE_RESTART:
3790                 delta = ap->cur_time - ap->link_time;
3791                 if (delta > ANEG_STATE_SETTLE_TIME)
3792                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793                 else
3794                         ret = ANEG_TIMER_ENAB;
3795                 break;
3796
3797         case ANEG_STATE_DISABLE_LINK_OK:
3798                 ret = ANEG_DONE;
3799                 break;
3800
3801         case ANEG_STATE_ABILITY_DETECT_INIT:
3802                 ap->flags &= ~(MR_TOGGLE_TX);
3803                 ap->txconfig = ANEG_CFG_FD;
3804                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805                 if (flowctrl & ADVERTISE_1000XPAUSE)
3806                         ap->txconfig |= ANEG_CFG_PS1;
3807                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808                         ap->txconfig |= ANEG_CFG_PS2;
3809                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811                 tw32_f(MAC_MODE, tp->mac_mode);
3812                 udelay(40);
3813
3814                 ap->state = ANEG_STATE_ABILITY_DETECT;
3815                 break;
3816
3817         case ANEG_STATE_ABILITY_DETECT:
3818                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820                 break;
3821
3822         case ANEG_STATE_ACK_DETECT_INIT:
3823                 ap->txconfig |= ANEG_CFG_ACK;
3824                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826                 tw32_f(MAC_MODE, tp->mac_mode);
3827                 udelay(40);
3828
3829                 ap->state = ANEG_STATE_ACK_DETECT;
3830
3831                 /* fallthru */
3832         case ANEG_STATE_ACK_DETECT:
3833                 if (ap->ack_match != 0) {
3834                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837                         } else {
3838                                 ap->state = ANEG_STATE_AN_ENABLE;
3839                         }
3840                 } else if (ap->ability_match != 0 &&
3841                            ap->rxconfig == 0) {
3842                         ap->state = ANEG_STATE_AN_ENABLE;
3843                 }
3844                 break;
3845
3846         case ANEG_STATE_COMPLETE_ACK_INIT:
3847                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848                         ret = ANEG_FAILED;
3849                         break;
3850                 }
3851                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852                                MR_LP_ADV_HALF_DUPLEX |
3853                                MR_LP_ADV_SYM_PAUSE |
3854                                MR_LP_ADV_ASYM_PAUSE |
3855                                MR_LP_ADV_REMOTE_FAULT1 |
3856                                MR_LP_ADV_REMOTE_FAULT2 |
3857                                MR_LP_ADV_NEXT_PAGE |
3858                                MR_TOGGLE_RX |
3859                                MR_NP_RX);
3860                 if (ap->rxconfig & ANEG_CFG_FD)
3861                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862                 if (ap->rxconfig & ANEG_CFG_HD)
3863                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864                 if (ap->rxconfig & ANEG_CFG_PS1)
3865                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866                 if (ap->rxconfig & ANEG_CFG_PS2)
3867                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868                 if (ap->rxconfig & ANEG_CFG_RF1)
3869                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870                 if (ap->rxconfig & ANEG_CFG_RF2)
3871                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872                 if (ap->rxconfig & ANEG_CFG_NP)
3873                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3874
3875                 ap->link_time = ap->cur_time;
3876
3877                 ap->flags ^= (MR_TOGGLE_TX);
3878                 if (ap->rxconfig & 0x0008)
3879                         ap->flags |= MR_TOGGLE_RX;
3880                 if (ap->rxconfig & ANEG_CFG_NP)
3881                         ap->flags |= MR_NP_RX;
3882                 ap->flags |= MR_PAGE_RX;
3883
3884                 ap->state = ANEG_STATE_COMPLETE_ACK;
3885                 ret = ANEG_TIMER_ENAB;
3886                 break;
3887
3888         case ANEG_STATE_COMPLETE_ACK:
3889                 if (ap->ability_match != 0 &&
3890                     ap->rxconfig == 0) {
3891                         ap->state = ANEG_STATE_AN_ENABLE;
3892                         break;
3893                 }
3894                 delta = ap->cur_time - ap->link_time;
3895                 if (delta > ANEG_STATE_SETTLE_TIME) {
3896                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898                         } else {
3899                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900                                     !(ap->flags & MR_NP_RX)) {
3901                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902                                 } else {
3903                                         ret = ANEG_FAILED;
3904                                 }
3905                         }
3906                 }
3907                 break;
3908
3909         case ANEG_STATE_IDLE_DETECT_INIT:
3910                 ap->link_time = ap->cur_time;
3911                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912                 tw32_f(MAC_MODE, tp->mac_mode);
3913                 udelay(40);
3914
3915                 ap->state = ANEG_STATE_IDLE_DETECT;
3916                 ret = ANEG_TIMER_ENAB;
3917                 break;
3918
3919         case ANEG_STATE_IDLE_DETECT:
3920                 if (ap->ability_match != 0 &&
3921                     ap->rxconfig == 0) {
3922                         ap->state = ANEG_STATE_AN_ENABLE;
3923                         break;
3924                 }
3925                 delta = ap->cur_time - ap->link_time;
3926                 if (delta > ANEG_STATE_SETTLE_TIME) {
3927                         /* XXX another gem from the Broadcom driver :( */
3928                         ap->state = ANEG_STATE_LINK_OK;
3929                 }
3930                 break;
3931
3932         case ANEG_STATE_LINK_OK:
3933                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934                 ret = ANEG_DONE;
3935                 break;
3936
3937         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938                 /* ??? unimplemented */
3939                 break;
3940
3941         case ANEG_STATE_NEXT_PAGE_WAIT:
3942                 /* ??? unimplemented */
3943                 break;
3944
3945         default:
3946                 ret = ANEG_FAILED;
3947                 break;
3948         }
3949
3950         return ret;
3951 }
3952
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3954 {
3955         int res = 0;
3956         struct tg3_fiber_aneginfo aninfo;
3957         int status = ANEG_FAILED;
3958         unsigned int tick;
3959         u32 tmp;
3960
3961         tw32_f(MAC_TX_AUTO_NEG, 0);
3962
3963         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965         udelay(40);
3966
3967         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968         udelay(40);
3969
3970         memset(&aninfo, 0, sizeof(aninfo));
3971         aninfo.flags |= MR_AN_ENABLE;
3972         aninfo.state = ANEG_STATE_UNKNOWN;
3973         aninfo.cur_time = 0;
3974         tick = 0;
3975         while (++tick < 195000) {
3976                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977                 if (status == ANEG_DONE || status == ANEG_FAILED)
3978                         break;
3979
3980                 udelay(1);
3981         }
3982
3983         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984         tw32_f(MAC_MODE, tp->mac_mode);
3985         udelay(40);
3986
3987         *txflags = aninfo.txconfig;
3988         *rxflags = aninfo.flags;
3989
3990         if (status == ANEG_DONE &&
3991             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992                              MR_LP_ADV_FULL_DUPLEX)))
3993                 res = 1;
3994
3995         return res;
3996 }
3997
3998 static void tg3_init_bcm8002(struct tg3 *tp)
3999 {
4000         u32 mac_status = tr32(MAC_STATUS);
4001         int i;
4002
4003         /* Reset when initting first time or we have a link. */
4004         if (tg3_flag(tp, INIT_COMPLETE) &&
4005             !(mac_status & MAC_STATUS_PCS_SYNCED))
4006                 return;
4007
4008         /* Set PLL lock range. */
4009         tg3_writephy(tp, 0x16, 0x8007);
4010
4011         /* SW reset */
4012         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4013
4014         /* Wait for reset to complete. */
4015         /* XXX schedule_timeout() ... */
4016         for (i = 0; i < 500; i++)
4017                 udelay(10);
4018
4019         /* Config mode; select PMA/Ch 1 regs. */
4020         tg3_writephy(tp, 0x10, 0x8411);
4021
4022         /* Enable auto-lock and comdet, select txclk for tx. */
4023         tg3_writephy(tp, 0x11, 0x0a10);
4024
4025         tg3_writephy(tp, 0x18, 0x00a0);
4026         tg3_writephy(tp, 0x16, 0x41ff);
4027
4028         /* Assert and deassert POR. */
4029         tg3_writephy(tp, 0x13, 0x0400);
4030         udelay(40);
4031         tg3_writephy(tp, 0x13, 0x0000);
4032
4033         tg3_writephy(tp, 0x11, 0x0a50);
4034         udelay(40);
4035         tg3_writephy(tp, 0x11, 0x0a10);
4036
4037         /* Wait for signal to stabilize */
4038         /* XXX schedule_timeout() ... */
4039         for (i = 0; i < 15000; i++)
4040                 udelay(10);
4041
4042         /* Deselect the channel register so we can read the PHYID
4043          * later.
4044          */
4045         tg3_writephy(tp, 0x10, 0x8011);
4046 }
4047
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4049 {
4050         u16 flowctrl;
4051         u32 sg_dig_ctrl, sg_dig_status;
4052         u32 serdes_cfg, expected_sg_dig_ctrl;
4053         int workaround, port_a;
4054         int current_link_up;
4055
4056         serdes_cfg = 0;
4057         expected_sg_dig_ctrl = 0;
4058         workaround = 0;
4059         port_a = 1;
4060         current_link_up = 0;
4061
4062         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064                 workaround = 1;
4065                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066                         port_a = 0;
4067
4068                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069                 /* preserve bits 20-23 for voltage regulator */
4070                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4071         }
4072
4073         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4074
4075         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077                         if (workaround) {
4078                                 u32 val = serdes_cfg;
4079
4080                                 if (port_a)
4081                                         val |= 0xc010000;
4082                                 else
4083                                         val |= 0x4010000;
4084                                 tw32_f(MAC_SERDES_CFG, val);
4085                         }
4086
4087                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4088                 }
4089                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090                         tg3_setup_flow_control(tp, 0, 0);
4091                         current_link_up = 1;
4092                 }
4093                 goto out;
4094         }
4095
4096         /* Want auto-negotiation.  */
4097         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4098
4099         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100         if (flowctrl & ADVERTISE_1000XPAUSE)
4101                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4104
4105         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107                     tp->serdes_counter &&
4108                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109                                     MAC_STATUS_RCVD_CFG)) ==
4110                      MAC_STATUS_PCS_SYNCED)) {
4111                         tp->serdes_counter--;
4112                         current_link_up = 1;
4113                         goto out;
4114                 }
4115 restart_autoneg:
4116                 if (workaround)
4117                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119                 udelay(5);
4120                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4121
4122                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125                                  MAC_STATUS_SIGNAL_DET)) {
4126                 sg_dig_status = tr32(SG_DIG_STATUS);
4127                 mac_status = tr32(MAC_STATUS);
4128
4129                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131                         u32 local_adv = 0, remote_adv = 0;
4132
4133                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134                                 local_adv |= ADVERTISE_1000XPAUSE;
4135                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4137
4138                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139                                 remote_adv |= LPA_1000XPAUSE;
4140                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4142
4143                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4144                         current_link_up = 1;
4145                         tp->serdes_counter = 0;
4146                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148                         if (tp->serdes_counter)
4149                                 tp->serdes_counter--;
4150                         else {
4151                                 if (workaround) {
4152                                         u32 val = serdes_cfg;
4153
4154                                         if (port_a)
4155                                                 val |= 0xc010000;
4156                                         else
4157                                                 val |= 0x4010000;
4158
4159                                         tw32_f(MAC_SERDES_CFG, val);
4160                                 }
4161
4162                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163                                 udelay(40);
4164
4165                                 /* Link parallel detection - link is up */
4166                                 /* only if we have PCS_SYNC and not */
4167                                 /* receiving config code words */
4168                                 mac_status = tr32(MAC_STATUS);
4169                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171                                         tg3_setup_flow_control(tp, 0, 0);
4172                                         current_link_up = 1;
4173                                         tp->phy_flags |=
4174                                                 TG3_PHYFLG_PARALLEL_DETECT;
4175                                         tp->serdes_counter =
4176                                                 SERDES_PARALLEL_DET_TIMEOUT;
4177                                 } else
4178                                         goto restart_autoneg;
4179                         }
4180                 }
4181         } else {
4182                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4184         }
4185
4186 out:
4187         return current_link_up;
4188 }
4189
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4191 {
4192         int current_link_up = 0;
4193
4194         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195                 goto out;
4196
4197         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198                 u32 txflags, rxflags;
4199                 int i;
4200
4201                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202                         u32 local_adv = 0, remote_adv = 0;
4203
4204                         if (txflags & ANEG_CFG_PS1)
4205                                 local_adv |= ADVERTISE_1000XPAUSE;
4206                         if (txflags & ANEG_CFG_PS2)
4207                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4208
4209                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210                                 remote_adv |= LPA_1000XPAUSE;
4211                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4213
4214                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4215
4216                         current_link_up = 1;
4217                 }
4218                 for (i = 0; i < 30; i++) {
4219                         udelay(20);
4220                         tw32_f(MAC_STATUS,
4221                                (MAC_STATUS_SYNC_CHANGED |
4222                                 MAC_STATUS_CFG_CHANGED));
4223                         udelay(40);
4224                         if ((tr32(MAC_STATUS) &
4225                              (MAC_STATUS_SYNC_CHANGED |
4226                               MAC_STATUS_CFG_CHANGED)) == 0)
4227                                 break;
4228                 }
4229
4230                 mac_status = tr32(MAC_STATUS);
4231                 if (current_link_up == 0 &&
4232                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233                     !(mac_status & MAC_STATUS_RCVD_CFG))
4234                         current_link_up = 1;
4235         } else {
4236                 tg3_setup_flow_control(tp, 0, 0);
4237
4238                 /* Forcing 1000FD link up. */
4239                 current_link_up = 1;
4240
4241                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242                 udelay(40);
4243
4244                 tw32_f(MAC_MODE, tp->mac_mode);
4245                 udelay(40);
4246         }
4247
4248 out:
4249         return current_link_up;
4250 }
4251
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4253 {
4254         u32 orig_pause_cfg;
4255         u16 orig_active_speed;
4256         u8 orig_active_duplex;
4257         u32 mac_status;
4258         int current_link_up;
4259         int i;
4260
4261         orig_pause_cfg = tp->link_config.active_flowctrl;
4262         orig_active_speed = tp->link_config.active_speed;
4263         orig_active_duplex = tp->link_config.active_duplex;
4264
4265         if (!tg3_flag(tp, HW_AUTONEG) &&
4266             netif_carrier_ok(tp->dev) &&
4267             tg3_flag(tp, INIT_COMPLETE)) {
4268                 mac_status = tr32(MAC_STATUS);
4269                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270                                MAC_STATUS_SIGNAL_DET |
4271                                MAC_STATUS_CFG_CHANGED |
4272                                MAC_STATUS_RCVD_CFG);
4273                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274                                    MAC_STATUS_SIGNAL_DET)) {
4275                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276                                             MAC_STATUS_CFG_CHANGED));
4277                         return 0;
4278                 }
4279         }
4280
4281         tw32_f(MAC_TX_AUTO_NEG, 0);
4282
4283         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285         tw32_f(MAC_MODE, tp->mac_mode);
4286         udelay(40);
4287
4288         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289                 tg3_init_bcm8002(tp);
4290
4291         /* Enable link change event even when serdes polling.  */
4292         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293         udelay(40);
4294
4295         current_link_up = 0;
4296         mac_status = tr32(MAC_STATUS);
4297
4298         if (tg3_flag(tp, HW_AUTONEG))
4299                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300         else
4301                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4302
4303         tp->napi[0].hw_status->status =
4304                 (SD_STATUS_UPDATED |
4305                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4306
4307         for (i = 0; i < 100; i++) {
4308                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309                                     MAC_STATUS_CFG_CHANGED));
4310                 udelay(5);
4311                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312                                          MAC_STATUS_CFG_CHANGED |
4313                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314                         break;
4315         }
4316
4317         mac_status = tr32(MAC_STATUS);
4318         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319                 current_link_up = 0;
4320                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321                     tp->serdes_counter == 0) {
4322                         tw32_f(MAC_MODE, (tp->mac_mode |
4323                                           MAC_MODE_SEND_CONFIGS));
4324                         udelay(1);
4325                         tw32_f(MAC_MODE, tp->mac_mode);
4326                 }
4327         }
4328
4329         if (current_link_up == 1) {
4330                 tp->link_config.active_speed = SPEED_1000;
4331                 tp->link_config.active_duplex = DUPLEX_FULL;
4332                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333                                     LED_CTRL_LNKLED_OVERRIDE |
4334                                     LED_CTRL_1000MBPS_ON));
4335         } else {
4336                 tp->link_config.active_speed = SPEED_INVALID;
4337                 tp->link_config.active_duplex = DUPLEX_INVALID;
4338                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339                                     LED_CTRL_LNKLED_OVERRIDE |
4340                                     LED_CTRL_TRAFFIC_OVERRIDE));
4341         }
4342
4343         if (current_link_up != netif_carrier_ok(tp->dev)) {
4344                 if (current_link_up)
4345                         netif_carrier_on(tp->dev);
4346                 else
4347                         netif_carrier_off(tp->dev);
4348                 tg3_link_report(tp);
4349         } else {
4350                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351                 if (orig_pause_cfg != now_pause_cfg ||
4352                     orig_active_speed != tp->link_config.active_speed ||
4353                     orig_active_duplex != tp->link_config.active_duplex)
4354                         tg3_link_report(tp);
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4361 {
4362         int current_link_up, err = 0;
4363         u32 bmsr, bmcr;
4364         u16 current_speed;
4365         u8 current_duplex;
4366         u32 local_adv, remote_adv;
4367
4368         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369         tw32_f(MAC_MODE, tp->mac_mode);
4370         udelay(40);
4371
4372         tw32(MAC_EVENT, 0);
4373
4374         tw32_f(MAC_STATUS,
4375              (MAC_STATUS_SYNC_CHANGED |
4376               MAC_STATUS_CFG_CHANGED |
4377               MAC_STATUS_MI_COMPLETION |
4378               MAC_STATUS_LNKSTATE_CHANGED));
4379         udelay(40);
4380
4381         if (force_reset)
4382                 tg3_phy_reset(tp);
4383
4384         current_link_up = 0;
4385         current_speed = SPEED_INVALID;
4386         current_duplex = DUPLEX_INVALID;
4387
4388         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392                         bmsr |= BMSR_LSTATUS;
4393                 else
4394                         bmsr &= ~BMSR_LSTATUS;
4395         }
4396
4397         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4398
4399         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401                 /* do nothing, just check for link up at the end */
4402         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403                 u32 adv, new_adv;
4404
4405                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407                                   ADVERTISE_1000XPAUSE |
4408                                   ADVERTISE_1000XPSE_ASYM |
4409                                   ADVERTISE_SLCT);
4410
4411                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4412
4413                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414                         new_adv |= ADVERTISE_1000XHALF;
4415                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416                         new_adv |= ADVERTISE_1000XFULL;
4417
4418                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421                         tg3_writephy(tp, MII_BMCR, bmcr);
4422
4423                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                         return err;
4428                 }
4429         } else {
4430                 u32 new_bmcr;
4431
4432                 bmcr &= ~BMCR_SPEED1000;
4433                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4434
4435                 if (tp->link_config.duplex == DUPLEX_FULL)
4436                         new_bmcr |= BMCR_FULLDPLX;
4437
4438                 if (new_bmcr != bmcr) {
4439                         /* BMCR_SPEED1000 is a reserved bit that needs
4440                          * to be set on write.
4441                          */
4442                         new_bmcr |= BMCR_SPEED1000;
4443
4444                         /* Force a linkdown */
4445                         if (netif_carrier_ok(tp->dev)) {
4446                                 u32 adv;
4447
4448                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449                                 adv &= ~(ADVERTISE_1000XFULL |
4450                                          ADVERTISE_1000XHALF |
4451                                          ADVERTISE_SLCT);
4452                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4453                                 tg3_writephy(tp, MII_BMCR, bmcr |
4454                                                            BMCR_ANRESTART |
4455                                                            BMCR_ANENABLE);
4456                                 udelay(10);
4457                                 netif_carrier_off(tp->dev);
4458                         }
4459                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4460                         bmcr = new_bmcr;
4461                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464                             ASIC_REV_5714) {
4465                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466                                         bmsr |= BMSR_LSTATUS;
4467                                 else
4468                                         bmsr &= ~BMSR_LSTATUS;
4469                         }
4470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4471                 }
4472         }
4473
4474         if (bmsr & BMSR_LSTATUS) {
4475                 current_speed = SPEED_1000;
4476                 current_link_up = 1;
4477                 if (bmcr & BMCR_FULLDPLX)
4478                         current_duplex = DUPLEX_FULL;
4479                 else
4480                         current_duplex = DUPLEX_HALF;
4481
4482                 local_adv = 0;
4483                 remote_adv = 0;
4484
4485                 if (bmcr & BMCR_ANENABLE) {
4486                         u32 common;
4487
4488                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490                         common = local_adv & remote_adv;
4491                         if (common & (ADVERTISE_1000XHALF |
4492                                       ADVERTISE_1000XFULL)) {
4493                                 if (common & ADVERTISE_1000XFULL)
4494                                         current_duplex = DUPLEX_FULL;
4495                                 else
4496                                         current_duplex = DUPLEX_HALF;
4497                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4498                                 /* Link is up via parallel detect */
4499                         } else {
4500                                 current_link_up = 0;
4501                         }
4502                 }
4503         }
4504
4505         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4507
4508         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509         if (tp->link_config.active_duplex == DUPLEX_HALF)
4510                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4511
4512         tw32_f(MAC_MODE, tp->mac_mode);
4513         udelay(40);
4514
4515         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4516
4517         tp->link_config.active_speed = current_speed;
4518         tp->link_config.active_duplex = current_duplex;
4519
4520         if (current_link_up != netif_carrier_ok(tp->dev)) {
4521                 if (current_link_up)
4522                         netif_carrier_on(tp->dev);
4523                 else {
4524                         netif_carrier_off(tp->dev);
4525                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4526                 }
4527                 tg3_link_report(tp);
4528         }
4529         return err;
4530 }
4531
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4533 {
4534         if (tp->serdes_counter) {
4535                 /* Give autoneg time to complete. */
4536                 tp->serdes_counter--;
4537                 return;
4538         }
4539
4540         if (!netif_carrier_ok(tp->dev) &&
4541             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542                 u32 bmcr;
4543
4544                 tg3_readphy(tp, MII_BMCR, &bmcr);
4545                 if (bmcr & BMCR_ANENABLE) {
4546                         u32 phy1, phy2;
4547
4548                         /* Select shadow register 0x1f */
4549                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4551
4552                         /* Select expansion interrupt status register */
4553                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554                                          MII_TG3_DSP_EXP1_INT_STAT);
4555                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557
4558                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559                                 /* We have signal detect and not receiving
4560                                  * config code words, link is up by parallel
4561                                  * detection.
4562                                  */
4563
4564                                 bmcr &= ~BMCR_ANENABLE;
4565                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566                                 tg3_writephy(tp, MII_BMCR, bmcr);
4567                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4568                         }
4569                 }
4570         } else if (netif_carrier_ok(tp->dev) &&
4571                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573                 u32 phy2;
4574
4575                 /* Select expansion interrupt status register */
4576                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577                                  MII_TG3_DSP_EXP1_INT_STAT);
4578                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579                 if (phy2 & 0x20) {
4580                         u32 bmcr;
4581
4582                         /* Config code words received, turn on autoneg. */
4583                         tg3_readphy(tp, MII_BMCR, &bmcr);
4584                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4585
4586                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4587
4588                 }
4589         }
4590 }
4591
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4593 {
4594         u32 val;
4595         int err;
4596
4597         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598                 err = tg3_setup_fiber_phy(tp, force_reset);
4599         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601         else
4602                 err = tg3_setup_copper_phy(tp, force_reset);
4603
4604         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605                 u32 scale;
4606
4607                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609                         scale = 65;
4610                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611                         scale = 6;
4612                 else
4613                         scale = 12;
4614
4615                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617                 tw32(GRC_MISC_CFG, val);
4618         }
4619
4620         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621               (6 << TX_LENGTHS_IPG_SHIFT);
4622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623                 val |= tr32(MAC_TX_LENGTHS) &
4624                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4626
4627         if (tp->link_config.active_speed == SPEED_1000 &&
4628             tp->link_config.active_duplex == DUPLEX_HALF)
4629                 tw32(MAC_TX_LENGTHS, val |
4630                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631         else
4632                 tw32(MAC_TX_LENGTHS, val |
4633                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4634
4635         if (!tg3_flag(tp, 5705_PLUS)) {
4636                 if (netif_carrier_ok(tp->dev)) {
4637                         tw32(HOSTCC_STAT_COAL_TICKS,
4638                              tp->coal.stats_block_coalesce_usecs);
4639                 } else {
4640                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4641                 }
4642         }
4643
4644         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645                 val = tr32(PCIE_PWR_MGMT_THRESH);
4646                 if (!netif_carrier_ok(tp->dev))
4647                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648                               tp->pwrmgmt_thresh;
4649                 else
4650                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651                 tw32(PCIE_PWR_MGMT_THRESH, val);
4652         }
4653
4654         return err;
4655 }
4656
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4658 {
4659         return tp->irq_sync;
4660 }
4661
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4663 {
4664         int i;
4665
4666         dst = (u32 *)((u8 *)dst + off);
4667         for (i = 0; i < len; i += sizeof(u32))
4668                 *dst++ = tr32(off + i);
4669 }
4670
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4672 {
4673         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4692
4693         if (tg3_flag(tp, SUPPORT_MSIX))
4694                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4695
4696         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4704
4705         if (!tg3_flag(tp, 5705_PLUS)) {
4706                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4709         }
4710
4711         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4716
4717         if (tg3_flag(tp, NVRAM))
4718                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4719 }
4720
4721 static void tg3_dump_state(struct tg3 *tp)
4722 {
4723         int i;
4724         u32 *regs;
4725
4726         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727         if (!regs) {
4728                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729                 return;
4730         }
4731
4732         if (tg3_flag(tp, PCI_EXPRESS)) {
4733                 /* Read up to but not including private PCI registers */
4734                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735                         regs[i / sizeof(u32)] = tr32(i);
4736         } else
4737                 tg3_dump_legacy_regs(tp, regs);
4738
4739         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740                 if (!regs[i + 0] && !regs[i + 1] &&
4741                     !regs[i + 2] && !regs[i + 3])
4742                         continue;
4743
4744                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745                            i * 4,
4746                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4747         }
4748
4749         kfree(regs);
4750
4751         for (i = 0; i < tp->irq_cnt; i++) {
4752                 struct tg3_napi *tnapi = &tp->napi[i];
4753
4754                 /* SW status block */
4755                 netdev_err(tp->dev,
4756                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4757                            i,
4758                            tnapi->hw_status->status,
4759                            tnapi->hw_status->status_tag,
4760                            tnapi->hw_status->rx_jumbo_consumer,
4761                            tnapi->hw_status->rx_consumer,
4762                            tnapi->hw_status->rx_mini_consumer,
4763                            tnapi->hw_status->idx[0].rx_producer,
4764                            tnapi->hw_status->idx[0].tx_consumer);
4765
4766                 netdev_err(tp->dev,
4767                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4768                            i,
4769                            tnapi->last_tag, tnapi->last_irq_tag,
4770                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771                            tnapi->rx_rcb_ptr,
4772                            tnapi->prodring.rx_std_prod_idx,
4773                            tnapi->prodring.rx_std_cons_idx,
4774                            tnapi->prodring.rx_jmb_prod_idx,
4775                            tnapi->prodring.rx_jmb_cons_idx);
4776         }
4777 }
4778
4779 /* This is called whenever we suspect that the system chipset is re-
4780  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781  * is bogus tx completions. We try to recover by setting the
4782  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783  * in the workqueue.
4784  */
4785 static void tg3_tx_recover(struct tg3 *tp)
4786 {
4787         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4789
4790         netdev_warn(tp->dev,
4791                     "The system may be re-ordering memory-mapped I/O "
4792                     "cycles to the network device, attempting to recover. "
4793                     "Please report the problem to the driver maintainer "
4794                     "and include system chipset information.\n");
4795
4796         spin_lock(&tp->lock);
4797         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798         spin_unlock(&tp->lock);
4799 }
4800
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4802 {
4803         /* Tell compiler to fetch tx indices from memory. */
4804         barrier();
4805         return tnapi->tx_pending -
4806                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4807 }
4808
4809 /* Tigon3 never reports partial packet sends.  So we do not
4810  * need special logic to handle SKBs that have not had all
4811  * of their frags sent yet, like SunGEM does.
4812  */
4813 static void tg3_tx(struct tg3_napi *tnapi)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817         u32 sw_idx = tnapi->tx_cons;
4818         struct netdev_queue *txq;
4819         int index = tnapi - tp->napi;
4820
4821         if (tg3_flag(tp, ENABLE_TSS))
4822                 index--;
4823
4824         txq = netdev_get_tx_queue(tp->dev, index);
4825
4826         while (sw_idx != hw_idx) {
4827                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828                 struct sk_buff *skb = ri->skb;
4829                 int i, tx_bug = 0;
4830
4831                 if (unlikely(skb == NULL)) {
4832                         tg3_tx_recover(tp);
4833                         return;
4834                 }
4835
4836                 pci_unmap_single(tp->pdev,
4837                                  dma_unmap_addr(ri, mapping),
4838                                  skb_headlen(skb),
4839                                  PCI_DMA_TODEVICE);
4840
4841                 ri->skb = NULL;
4842
4843                 while (ri->fragmented) {
4844                         ri->fragmented = false;
4845                         sw_idx = NEXT_TX(sw_idx);
4846                         ri = &tnapi->tx_buffers[sw_idx];
4847                 }
4848
4849                 sw_idx = NEXT_TX(sw_idx);
4850
4851                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4852                         ri = &tnapi->tx_buffers[sw_idx];
4853                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4854                                 tx_bug = 1;
4855
4856                         pci_unmap_page(tp->pdev,
4857                                        dma_unmap_addr(ri, mapping),
4858                                        skb_shinfo(skb)->frags[i].size,
4859                                        PCI_DMA_TODEVICE);
4860
4861                         while (ri->fragmented) {
4862                                 ri->fragmented = false;
4863                                 sw_idx = NEXT_TX(sw_idx);
4864                                 ri = &tnapi->tx_buffers[sw_idx];
4865                         }
4866
4867                         sw_idx = NEXT_TX(sw_idx);
4868                 }
4869
4870                 dev_kfree_skb(skb);
4871
4872                 if (unlikely(tx_bug)) {
4873                         tg3_tx_recover(tp);
4874                         return;
4875                 }
4876         }
4877
4878         tnapi->tx_cons = sw_idx;
4879
4880         /* Need to make the tx_cons update visible to tg3_start_xmit()
4881          * before checking for netif_queue_stopped().  Without the
4882          * memory barrier, there is a small possibility that tg3_start_xmit()
4883          * will miss it and cause the queue to be stopped forever.
4884          */
4885         smp_mb();
4886
4887         if (unlikely(netif_tx_queue_stopped(txq) &&
4888                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4889                 __netif_tx_lock(txq, smp_processor_id());
4890                 if (netif_tx_queue_stopped(txq) &&
4891                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4892                         netif_tx_wake_queue(txq);
4893                 __netif_tx_unlock(txq);
4894         }
4895 }
4896
4897 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4898 {
4899         if (!ri->skb)
4900                 return;
4901
4902         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4903                          map_sz, PCI_DMA_FROMDEVICE);
4904         dev_kfree_skb_any(ri->skb);
4905         ri->skb = NULL;
4906 }
4907
4908 /* Returns size of skb allocated or < 0 on error.
4909  *
4910  * We only need to fill in the address because the other members
4911  * of the RX descriptor are invariant, see tg3_init_rings.
4912  *
4913  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4914  * posting buffers we only dirty the first cache line of the RX
4915  * descriptor (containing the address).  Whereas for the RX status
4916  * buffers the cpu only reads the last cacheline of the RX descriptor
4917  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4918  */
4919 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4920                             u32 opaque_key, u32 dest_idx_unmasked)
4921 {
4922         struct tg3_rx_buffer_desc *desc;
4923         struct ring_info *map;
4924         struct sk_buff *skb;
4925         dma_addr_t mapping;
4926         int skb_size, dest_idx;
4927
4928         switch (opaque_key) {
4929         case RXD_OPAQUE_RING_STD:
4930                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4931                 desc = &tpr->rx_std[dest_idx];
4932                 map = &tpr->rx_std_buffers[dest_idx];
4933                 skb_size = tp->rx_pkt_map_sz;
4934                 break;
4935
4936         case RXD_OPAQUE_RING_JUMBO:
4937                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4938                 desc = &tpr->rx_jmb[dest_idx].std;
4939                 map = &tpr->rx_jmb_buffers[dest_idx];
4940                 skb_size = TG3_RX_JMB_MAP_SZ;
4941                 break;
4942
4943         default:
4944                 return -EINVAL;
4945         }
4946
4947         /* Do not overwrite any of the map or rp information
4948          * until we are sure we can commit to a new buffer.
4949          *
4950          * Callers depend upon this behavior and assume that
4951          * we leave everything unchanged if we fail.
4952          */
4953         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4954         if (skb == NULL)
4955                 return -ENOMEM;
4956
4957         skb_reserve(skb, tp->rx_offset);
4958
4959         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4960                                  PCI_DMA_FROMDEVICE);
4961         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4962                 dev_kfree_skb(skb);
4963                 return -EIO;
4964         }
4965
4966         map->skb = skb;
4967         dma_unmap_addr_set(map, mapping, mapping);
4968
4969         desc->addr_hi = ((u64)mapping >> 32);
4970         desc->addr_lo = ((u64)mapping & 0xffffffff);
4971
4972         return skb_size;
4973 }
4974
4975 /* We only need to move over in the address because the other
4976  * members of the RX descriptor are invariant.  See notes above
4977  * tg3_alloc_rx_skb for full details.
4978  */
4979 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4980                            struct tg3_rx_prodring_set *dpr,
4981                            u32 opaque_key, int src_idx,
4982                            u32 dest_idx_unmasked)
4983 {
4984         struct tg3 *tp = tnapi->tp;
4985         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4986         struct ring_info *src_map, *dest_map;
4987         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4988         int dest_idx;
4989
4990         switch (opaque_key) {
4991         case RXD_OPAQUE_RING_STD:
4992                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4993                 dest_desc = &dpr->rx_std[dest_idx];
4994                 dest_map = &dpr->rx_std_buffers[dest_idx];
4995                 src_desc = &spr->rx_std[src_idx];
4996                 src_map = &spr->rx_std_buffers[src_idx];
4997                 break;
4998
4999         case RXD_OPAQUE_RING_JUMBO:
5000                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5001                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5002                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5003                 src_desc = &spr->rx_jmb[src_idx].std;
5004                 src_map = &spr->rx_jmb_buffers[src_idx];
5005                 break;
5006
5007         default:
5008                 return;
5009         }
5010
5011         dest_map->skb = src_map->skb;
5012         dma_unmap_addr_set(dest_map, mapping,
5013                            dma_unmap_addr(src_map, mapping));
5014         dest_desc->addr_hi = src_desc->addr_hi;
5015         dest_desc->addr_lo = src_desc->addr_lo;
5016
5017         /* Ensure that the update to the skb happens after the physical
5018          * addresses have been transferred to the new BD location.
5019          */
5020         smp_wmb();
5021
5022         src_map->skb = NULL;
5023 }
5024
5025 /* The RX ring scheme is composed of multiple rings which post fresh
5026  * buffers to the chip, and one special ring the chip uses to report
5027  * status back to the host.
5028  *
5029  * The special ring reports the status of received packets to the
5030  * host.  The chip does not write into the original descriptor the
5031  * RX buffer was obtained from.  The chip simply takes the original
5032  * descriptor as provided by the host, updates the status and length
5033  * field, then writes this into the next status ring entry.
5034  *
5035  * Each ring the host uses to post buffers to the chip is described
5036  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5037  * it is first placed into the on-chip ram.  When the packet's length
5038  * is known, it walks down the TG3_BDINFO entries to select the ring.
5039  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5040  * which is within the range of the new packet's length is chosen.
5041  *
5042  * The "separate ring for rx status" scheme may sound queer, but it makes
5043  * sense from a cache coherency perspective.  If only the host writes
5044  * to the buffer post rings, and only the chip writes to the rx status
5045  * rings, then cache lines never move beyond shared-modified state.
5046  * If both the host and chip were to write into the same ring, cache line
5047  * eviction could occur since both entities want it in an exclusive state.
5048  */
5049 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5050 {
5051         struct tg3 *tp = tnapi->tp;
5052         u32 work_mask, rx_std_posted = 0;
5053         u32 std_prod_idx, jmb_prod_idx;
5054         u32 sw_idx = tnapi->rx_rcb_ptr;
5055         u16 hw_idx;
5056         int received;
5057         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5058
5059         hw_idx = *(tnapi->rx_rcb_prod_idx);
5060         /*
5061          * We need to order the read of hw_idx and the read of
5062          * the opaque cookie.
5063          */
5064         rmb();
5065         work_mask = 0;
5066         received = 0;
5067         std_prod_idx = tpr->rx_std_prod_idx;
5068         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5069         while (sw_idx != hw_idx && budget > 0) {
5070                 struct ring_info *ri;
5071                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5072                 unsigned int len;
5073                 struct sk_buff *skb;
5074                 dma_addr_t dma_addr;
5075                 u32 opaque_key, desc_idx, *post_ptr;
5076
5077                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5078                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5079                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5080                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5081                         dma_addr = dma_unmap_addr(ri, mapping);
5082                         skb = ri->skb;
5083                         post_ptr = &std_prod_idx;
5084                         rx_std_posted++;
5085                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5086                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5087                         dma_addr = dma_unmap_addr(ri, mapping);
5088                         skb = ri->skb;
5089                         post_ptr = &jmb_prod_idx;
5090                 } else
5091                         goto next_pkt_nopost;
5092
5093                 work_mask |= opaque_key;
5094
5095                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5096                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5097                 drop_it:
5098                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5099                                        desc_idx, *post_ptr);
5100                 drop_it_no_recycle:
5101                         /* Other statistics kept track of by card. */
5102                         tp->rx_dropped++;
5103                         goto next_pkt;
5104                 }
5105
5106                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5107                       ETH_FCS_LEN;
5108
5109                 if (len > TG3_RX_COPY_THRESH(tp)) {
5110                         int skb_size;
5111
5112                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5113                                                     *post_ptr);
5114                         if (skb_size < 0)
5115                                 goto drop_it;
5116
5117                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5118                                          PCI_DMA_FROMDEVICE);
5119
5120                         /* Ensure that the update to the skb happens
5121                          * after the usage of the old DMA mapping.
5122                          */
5123                         smp_wmb();
5124
5125                         ri->skb = NULL;
5126
5127                         skb_put(skb, len);
5128                 } else {
5129                         struct sk_buff *copy_skb;
5130
5131                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5132                                        desc_idx, *post_ptr);
5133
5134                         copy_skb = netdev_alloc_skb(tp->dev, len +
5135                                                     TG3_RAW_IP_ALIGN);
5136                         if (copy_skb == NULL)
5137                                 goto drop_it_no_recycle;
5138
5139                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5140                         skb_put(copy_skb, len);
5141                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5142                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5143                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5144
5145                         /* We'll reuse the original ring buffer. */
5146                         skb = copy_skb;
5147                 }
5148
5149                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5150                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5151                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5152                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5153                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5154                 else
5155                         skb_checksum_none_assert(skb);
5156
5157                 skb->protocol = eth_type_trans(skb, tp->dev);
5158
5159                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5160                     skb->protocol != htons(ETH_P_8021Q)) {
5161                         dev_kfree_skb(skb);
5162                         goto drop_it_no_recycle;
5163                 }
5164
5165                 if (desc->type_flags & RXD_FLAG_VLAN &&
5166                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5167                         __vlan_hwaccel_put_tag(skb,
5168                                                desc->err_vlan & RXD_VLAN_MASK);
5169
5170                 napi_gro_receive(&tnapi->napi, skb);
5171
5172                 received++;
5173                 budget--;
5174
5175 next_pkt:
5176                 (*post_ptr)++;
5177
5178                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5179                         tpr->rx_std_prod_idx = std_prod_idx &
5180                                                tp->rx_std_ring_mask;
5181                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5182                                      tpr->rx_std_prod_idx);
5183                         work_mask &= ~RXD_OPAQUE_RING_STD;
5184                         rx_std_posted = 0;
5185                 }
5186 next_pkt_nopost:
5187                 sw_idx++;
5188                 sw_idx &= tp->rx_ret_ring_mask;
5189
5190                 /* Refresh hw_idx to see if there is new work */
5191                 if (sw_idx == hw_idx) {
5192                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5193                         rmb();
5194                 }
5195         }
5196
5197         /* ACK the status ring. */
5198         tnapi->rx_rcb_ptr = sw_idx;
5199         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5200
5201         /* Refill RX ring(s). */
5202         if (!tg3_flag(tp, ENABLE_RSS)) {
5203                 if (work_mask & RXD_OPAQUE_RING_STD) {
5204                         tpr->rx_std_prod_idx = std_prod_idx &
5205                                                tp->rx_std_ring_mask;
5206                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5207                                      tpr->rx_std_prod_idx);
5208                 }
5209                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5210                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5211                                                tp->rx_jmb_ring_mask;
5212                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5213                                      tpr->rx_jmb_prod_idx);
5214                 }
5215                 mmiowb();
5216         } else if (work_mask) {
5217                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5218                  * updated before the producer indices can be updated.
5219                  */
5220                 smp_wmb();
5221
5222                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5223                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5224
5225                 if (tnapi != &tp->napi[1])
5226                         napi_schedule(&tp->napi[1].napi);
5227         }
5228
5229         return received;
5230 }
5231
5232 static void tg3_poll_link(struct tg3 *tp)
5233 {
5234         /* handle link change and other phy events */
5235         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5236                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5237
5238                 if (sblk->status & SD_STATUS_LINK_CHG) {
5239                         sblk->status = SD_STATUS_UPDATED |
5240                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5241                         spin_lock(&tp->lock);
5242                         if (tg3_flag(tp, USE_PHYLIB)) {
5243                                 tw32_f(MAC_STATUS,
5244                                      (MAC_STATUS_SYNC_CHANGED |
5245                                       MAC_STATUS_CFG_CHANGED |
5246                                       MAC_STATUS_MI_COMPLETION |
5247                                       MAC_STATUS_LNKSTATE_CHANGED));
5248                                 udelay(40);
5249                         } else
5250                                 tg3_setup_phy(tp, 0);
5251                         spin_unlock(&tp->lock);
5252                 }
5253         }
5254 }
5255
5256 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5257                                 struct tg3_rx_prodring_set *dpr,
5258                                 struct tg3_rx_prodring_set *spr)
5259 {
5260         u32 si, di, cpycnt, src_prod_idx;
5261         int i, err = 0;
5262
5263         while (1) {
5264                 src_prod_idx = spr->rx_std_prod_idx;
5265
5266                 /* Make sure updates to the rx_std_buffers[] entries and the
5267                  * standard producer index are seen in the correct order.
5268                  */
5269                 smp_rmb();
5270
5271                 if (spr->rx_std_cons_idx == src_prod_idx)
5272                         break;
5273
5274                 if (spr->rx_std_cons_idx < src_prod_idx)
5275                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5276                 else
5277                         cpycnt = tp->rx_std_ring_mask + 1 -
5278                                  spr->rx_std_cons_idx;
5279
5280                 cpycnt = min(cpycnt,
5281                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5282
5283                 si = spr->rx_std_cons_idx;
5284                 di = dpr->rx_std_prod_idx;
5285
5286                 for (i = di; i < di + cpycnt; i++) {
5287                         if (dpr->rx_std_buffers[i].skb) {
5288                                 cpycnt = i - di;
5289                                 err = -ENOSPC;
5290                                 break;
5291                         }
5292                 }
5293
5294                 if (!cpycnt)
5295                         break;
5296
5297                 /* Ensure that updates to the rx_std_buffers ring and the
5298                  * shadowed hardware producer ring from tg3_recycle_skb() are
5299                  * ordered correctly WRT the skb check above.
5300                  */
5301                 smp_rmb();
5302
5303                 memcpy(&dpr->rx_std_buffers[di],
5304                        &spr->rx_std_buffers[si],
5305                        cpycnt * sizeof(struct ring_info));
5306
5307                 for (i = 0; i < cpycnt; i++, di++, si++) {
5308                         struct tg3_rx_buffer_desc *sbd, *dbd;
5309                         sbd = &spr->rx_std[si];
5310                         dbd = &dpr->rx_std[di];
5311                         dbd->addr_hi = sbd->addr_hi;
5312                         dbd->addr_lo = sbd->addr_lo;
5313                 }
5314
5315                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5316                                        tp->rx_std_ring_mask;
5317                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5318                                        tp->rx_std_ring_mask;
5319         }
5320
5321         while (1) {
5322                 src_prod_idx = spr->rx_jmb_prod_idx;
5323
5324                 /* Make sure updates to the rx_jmb_buffers[] entries and
5325                  * the jumbo producer index are seen in the correct order.
5326                  */
5327                 smp_rmb();
5328
5329                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5330                         break;
5331
5332                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5333                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5334                 else
5335                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5336                                  spr->rx_jmb_cons_idx;
5337
5338                 cpycnt = min(cpycnt,
5339                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5340
5341                 si = spr->rx_jmb_cons_idx;
5342                 di = dpr->rx_jmb_prod_idx;
5343
5344                 for (i = di; i < di + cpycnt; i++) {
5345                         if (dpr->rx_jmb_buffers[i].skb) {
5346                                 cpycnt = i - di;
5347                                 err = -ENOSPC;
5348                                 break;
5349                         }
5350                 }
5351
5352                 if (!cpycnt)
5353                         break;
5354
5355                 /* Ensure that updates to the rx_jmb_buffers ring and the
5356                  * shadowed hardware producer ring from tg3_recycle_skb() are
5357                  * ordered correctly WRT the skb check above.
5358                  */
5359                 smp_rmb();
5360
5361                 memcpy(&dpr->rx_jmb_buffers[di],
5362                        &spr->rx_jmb_buffers[si],
5363                        cpycnt * sizeof(struct ring_info));
5364
5365                 for (i = 0; i < cpycnt; i++, di++, si++) {
5366                         struct tg3_rx_buffer_desc *sbd, *dbd;
5367                         sbd = &spr->rx_jmb[si].std;
5368                         dbd = &dpr->rx_jmb[di].std;
5369                         dbd->addr_hi = sbd->addr_hi;
5370                         dbd->addr_lo = sbd->addr_lo;
5371                 }
5372
5373                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5374                                        tp->rx_jmb_ring_mask;
5375                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5376                                        tp->rx_jmb_ring_mask;
5377         }
5378
5379         return err;
5380 }
5381
5382 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5383 {
5384         struct tg3 *tp = tnapi->tp;
5385
5386         /* run TX completion thread */
5387         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5388                 tg3_tx(tnapi);
5389                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390                         return work_done;
5391         }
5392
5393         /* run RX thread, within the bounds set by NAPI.
5394          * All RX "locking" is done by ensuring outside
5395          * code synchronizes with tg3->napi.poll()
5396          */
5397         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5398                 work_done += tg3_rx(tnapi, budget - work_done);
5399
5400         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5401                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5402                 int i, err = 0;
5403                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5404                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5405
5406                 for (i = 1; i < tp->irq_cnt; i++)
5407                         err |= tg3_rx_prodring_xfer(tp, dpr,
5408                                                     &tp->napi[i].prodring);
5409
5410                 wmb();
5411
5412                 if (std_prod_idx != dpr->rx_std_prod_idx)
5413                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5414                                      dpr->rx_std_prod_idx);
5415
5416                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5417                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5418                                      dpr->rx_jmb_prod_idx);
5419
5420                 mmiowb();
5421
5422                 if (err)
5423                         tw32_f(HOSTCC_MODE, tp->coal_now);
5424         }
5425
5426         return work_done;
5427 }
5428
5429 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5430 {
5431         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5432         struct tg3 *tp = tnapi->tp;
5433         int work_done = 0;
5434         struct tg3_hw_status *sblk = tnapi->hw_status;
5435
5436         while (1) {
5437                 work_done = tg3_poll_work(tnapi, work_done, budget);
5438
5439                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5440                         goto tx_recovery;
5441
5442                 if (unlikely(work_done >= budget))
5443                         break;
5444
5445                 /* tp->last_tag is used in tg3_int_reenable() below
5446                  * to tell the hw how much work has been processed,
5447                  * so we must read it before checking for more work.
5448                  */
5449                 tnapi->last_tag = sblk->status_tag;
5450                 tnapi->last_irq_tag = tnapi->last_tag;
5451                 rmb();
5452
5453                 /* check for RX/TX work to do */
5454                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5455                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5456                         napi_complete(napi);
5457                         /* Reenable interrupts. */
5458                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5459                         mmiowb();
5460                         break;
5461                 }
5462         }
5463
5464         return work_done;
5465
5466 tx_recovery:
5467         /* work_done is guaranteed to be less than budget. */
5468         napi_complete(napi);
5469         schedule_work(&tp->reset_task);
5470         return work_done;
5471 }
5472
5473 static void tg3_process_error(struct tg3 *tp)
5474 {
5475         u32 val;
5476         bool real_error = false;
5477
5478         if (tg3_flag(tp, ERROR_PROCESSED))
5479                 return;
5480
5481         /* Check Flow Attention register */
5482         val = tr32(HOSTCC_FLOW_ATTN);
5483         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5484                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5485                 real_error = true;
5486         }
5487
5488         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5489                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5490                 real_error = true;
5491         }
5492
5493         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5494                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5495                 real_error = true;
5496         }
5497
5498         if (!real_error)
5499                 return;
5500
5501         tg3_dump_state(tp);
5502
5503         tg3_flag_set(tp, ERROR_PROCESSED);
5504         schedule_work(&tp->reset_task);
5505 }
5506
5507 static int tg3_poll(struct napi_struct *napi, int budget)
5508 {
5509         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5510         struct tg3 *tp = tnapi->tp;
5511         int work_done = 0;
5512         struct tg3_hw_status *sblk = tnapi->hw_status;
5513
5514         while (1) {
5515                 if (sblk->status & SD_STATUS_ERROR)
5516                         tg3_process_error(tp);
5517
5518                 tg3_poll_link(tp);
5519
5520                 work_done = tg3_poll_work(tnapi, work_done, budget);
5521
5522                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5523                         goto tx_recovery;
5524
5525                 if (unlikely(work_done >= budget))
5526                         break;
5527
5528                 if (tg3_flag(tp, TAGGED_STATUS)) {
5529                         /* tp->last_tag is used in tg3_int_reenable() below
5530                          * to tell the hw how much work has been processed,
5531                          * so we must read it before checking for more work.
5532                          */
5533                         tnapi->last_tag = sblk->status_tag;
5534                         tnapi->last_irq_tag = tnapi->last_tag;
5535                         rmb();
5536                 } else
5537                         sblk->status &= ~SD_STATUS_UPDATED;
5538
5539                 if (likely(!tg3_has_work(tnapi))) {
5540                         napi_complete(napi);
5541                         tg3_int_reenable(tnapi);
5542                         break;
5543                 }
5544         }
5545
5546         return work_done;
5547
5548 tx_recovery:
5549         /* work_done is guaranteed to be less than budget. */
5550         napi_complete(napi);
5551         schedule_work(&tp->reset_task);
5552         return work_done;
5553 }
5554
5555 static void tg3_napi_disable(struct tg3 *tp)
5556 {
5557         int i;
5558
5559         for (i = tp->irq_cnt - 1; i >= 0; i--)
5560                 napi_disable(&tp->napi[i].napi);
5561 }
5562
5563 static void tg3_napi_enable(struct tg3 *tp)
5564 {
5565         int i;
5566
5567         for (i = 0; i < tp->irq_cnt; i++)
5568                 napi_enable(&tp->napi[i].napi);
5569 }
5570
5571 static void tg3_napi_init(struct tg3 *tp)
5572 {
5573         int i;
5574
5575         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5576         for (i = 1; i < tp->irq_cnt; i++)
5577                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5578 }
5579
5580 static void tg3_napi_fini(struct tg3 *tp)
5581 {
5582         int i;
5583
5584         for (i = 0; i < tp->irq_cnt; i++)
5585                 netif_napi_del(&tp->napi[i].napi);
5586 }
5587
5588 static inline void tg3_netif_stop(struct tg3 *tp)
5589 {
5590         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5591         tg3_napi_disable(tp);
5592         netif_tx_disable(tp->dev);
5593 }
5594
5595 static inline void tg3_netif_start(struct tg3 *tp)
5596 {
5597         /* NOTE: unconditional netif_tx_wake_all_queues is only
5598          * appropriate so long as all callers are assured to
5599          * have free tx slots (such as after tg3_init_hw)
5600          */
5601         netif_tx_wake_all_queues(tp->dev);
5602
5603         tg3_napi_enable(tp);
5604         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5605         tg3_enable_ints(tp);
5606 }
5607
5608 static void tg3_irq_quiesce(struct tg3 *tp)
5609 {
5610         int i;
5611
5612         BUG_ON(tp->irq_sync);
5613
5614         tp->irq_sync = 1;
5615         smp_mb();
5616
5617         for (i = 0; i < tp->irq_cnt; i++)
5618                 synchronize_irq(tp->napi[i].irq_vec);
5619 }
5620
5621 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5622  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5623  * with as well.  Most of the time, this is not necessary except when
5624  * shutting down the device.
5625  */
5626 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5627 {
5628         spin_lock_bh(&tp->lock);
5629         if (irq_sync)
5630                 tg3_irq_quiesce(tp);
5631 }
5632
5633 static inline void tg3_full_unlock(struct tg3 *tp)
5634 {
5635         spin_unlock_bh(&tp->lock);
5636 }
5637
5638 /* One-shot MSI handler - Chip automatically disables interrupt
5639  * after sending MSI so driver doesn't have to do it.
5640  */
5641 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5642 {
5643         struct tg3_napi *tnapi = dev_id;
5644         struct tg3 *tp = tnapi->tp;
5645
5646         prefetch(tnapi->hw_status);
5647         if (tnapi->rx_rcb)
5648                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5649
5650         if (likely(!tg3_irq_sync(tp)))
5651                 napi_schedule(&tnapi->napi);
5652
5653         return IRQ_HANDLED;
5654 }
5655
5656 /* MSI ISR - No need to check for interrupt sharing and no need to
5657  * flush status block and interrupt mailbox. PCI ordering rules
5658  * guarantee that MSI will arrive after the status block.
5659  */
5660 static irqreturn_t tg3_msi(int irq, void *dev_id)
5661 {
5662         struct tg3_napi *tnapi = dev_id;
5663         struct tg3 *tp = tnapi->tp;
5664
5665         prefetch(tnapi->hw_status);
5666         if (tnapi->rx_rcb)
5667                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5668         /*
5669          * Writing any value to intr-mbox-0 clears PCI INTA# and
5670          * chip-internal interrupt pending events.
5671          * Writing non-zero to intr-mbox-0 additional tells the
5672          * NIC to stop sending us irqs, engaging "in-intr-handler"
5673          * event coalescing.
5674          */
5675         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5676         if (likely(!tg3_irq_sync(tp)))
5677                 napi_schedule(&tnapi->napi);
5678
5679         return IRQ_RETVAL(1);
5680 }
5681
5682 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5683 {
5684         struct tg3_napi *tnapi = dev_id;
5685         struct tg3 *tp = tnapi->tp;
5686         struct tg3_hw_status *sblk = tnapi->hw_status;
5687         unsigned int handled = 1;
5688
5689         /* In INTx mode, it is possible for the interrupt to arrive at
5690          * the CPU before the status block posted prior to the interrupt.
5691          * Reading the PCI State register will confirm whether the
5692          * interrupt is ours and will flush the status block.
5693          */
5694         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5695                 if (tg3_flag(tp, CHIP_RESETTING) ||
5696                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5697                         handled = 0;
5698                         goto out;
5699                 }
5700         }
5701
5702         /*
5703          * Writing any value to intr-mbox-0 clears PCI INTA# and
5704          * chip-internal interrupt pending events.
5705          * Writing non-zero to intr-mbox-0 additional tells the
5706          * NIC to stop sending us irqs, engaging "in-intr-handler"
5707          * event coalescing.
5708          *
5709          * Flush the mailbox to de-assert the IRQ immediately to prevent
5710          * spurious interrupts.  The flush impacts performance but
5711          * excessive spurious interrupts can be worse in some cases.
5712          */
5713         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5714         if (tg3_irq_sync(tp))
5715                 goto out;
5716         sblk->status &= ~SD_STATUS_UPDATED;
5717         if (likely(tg3_has_work(tnapi))) {
5718                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5719                 napi_schedule(&tnapi->napi);
5720         } else {
5721                 /* No work, shared interrupt perhaps?  re-enable
5722                  * interrupts, and flush that PCI write
5723                  */
5724                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5725                                0x00000000);
5726         }
5727 out:
5728         return IRQ_RETVAL(handled);
5729 }
5730
5731 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5732 {
5733         struct tg3_napi *tnapi = dev_id;
5734         struct tg3 *tp = tnapi->tp;
5735         struct tg3_hw_status *sblk = tnapi->hw_status;
5736         unsigned int handled = 1;
5737
5738         /* In INTx mode, it is possible for the interrupt to arrive at
5739          * the CPU before the status block posted prior to the interrupt.
5740          * Reading the PCI State register will confirm whether the
5741          * interrupt is ours and will flush the status block.
5742          */
5743         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5744                 if (tg3_flag(tp, CHIP_RESETTING) ||
5745                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5746                         handled = 0;
5747                         goto out;
5748                 }
5749         }
5750
5751         /*
5752          * writing any value to intr-mbox-0 clears PCI INTA# and
5753          * chip-internal interrupt pending events.
5754          * writing non-zero to intr-mbox-0 additional tells the
5755          * NIC to stop sending us irqs, engaging "in-intr-handler"
5756          * event coalescing.
5757          *
5758          * Flush the mailbox to de-assert the IRQ immediately to prevent
5759          * spurious interrupts.  The flush impacts performance but
5760          * excessive spurious interrupts can be worse in some cases.
5761          */
5762         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5763
5764         /*
5765          * In a shared interrupt configuration, sometimes other devices'
5766          * interrupts will scream.  We record the current status tag here
5767          * so that the above check can report that the screaming interrupts
5768          * are unhandled.  Eventually they will be silenced.
5769          */
5770         tnapi->last_irq_tag = sblk->status_tag;
5771
5772         if (tg3_irq_sync(tp))
5773                 goto out;
5774
5775         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5776
5777         napi_schedule(&tnapi->napi);
5778
5779 out:
5780         return IRQ_RETVAL(handled);
5781 }
5782
5783 /* ISR for interrupt test */
5784 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5785 {
5786         struct tg3_napi *tnapi = dev_id;
5787         struct tg3 *tp = tnapi->tp;
5788         struct tg3_hw_status *sblk = tnapi->hw_status;
5789
5790         if ((sblk->status & SD_STATUS_UPDATED) ||
5791             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5792                 tg3_disable_ints(tp);
5793                 return IRQ_RETVAL(1);
5794         }
5795         return IRQ_RETVAL(0);
5796 }
5797
5798 static int tg3_init_hw(struct tg3 *, int);
5799 static int tg3_halt(struct tg3 *, int, int);
5800
5801 /* Restart hardware after configuration changes, self-test, etc.
5802  * Invoked with tp->lock held.
5803  */
5804 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5805         __releases(tp->lock)
5806         __acquires(tp->lock)
5807 {
5808         int err;
5809
5810         err = tg3_init_hw(tp, reset_phy);
5811         if (err) {
5812                 netdev_err(tp->dev,
5813                            "Failed to re-initialize device, aborting\n");
5814                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5815                 tg3_full_unlock(tp);
5816                 del_timer_sync(&tp->timer);
5817                 tp->irq_sync = 0;
5818                 tg3_napi_enable(tp);
5819                 dev_close(tp->dev);
5820                 tg3_full_lock(tp, 0);
5821         }
5822         return err;
5823 }
5824
5825 #ifdef CONFIG_NET_POLL_CONTROLLER
5826 static void tg3_poll_controller(struct net_device *dev)
5827 {
5828         int i;
5829         struct tg3 *tp = netdev_priv(dev);
5830
5831         for (i = 0; i < tp->irq_cnt; i++)
5832                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5833 }
5834 #endif
5835
5836 static void tg3_reset_task(struct work_struct *work)
5837 {
5838         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5839         int err;
5840         unsigned int restart_timer;
5841
5842         tg3_full_lock(tp, 0);
5843
5844         if (!netif_running(tp->dev)) {
5845                 tg3_full_unlock(tp);
5846                 return;
5847         }
5848
5849         tg3_full_unlock(tp);
5850
5851         tg3_phy_stop(tp);
5852
5853         tg3_netif_stop(tp);
5854
5855         tg3_full_lock(tp, 1);
5856
5857         restart_timer = tg3_flag(tp, RESTART_TIMER);
5858         tg3_flag_clear(tp, RESTART_TIMER);
5859
5860         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5861                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5862                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5863                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5864                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5865         }
5866
5867         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5868         err = tg3_init_hw(tp, 1);
5869         if (err)
5870                 goto out;
5871
5872         tg3_netif_start(tp);
5873
5874         if (restart_timer)
5875                 mod_timer(&tp->timer, jiffies + 1);
5876
5877 out:
5878         tg3_full_unlock(tp);
5879
5880         if (!err)
5881                 tg3_phy_start(tp);
5882 }
5883
5884 static void tg3_tx_timeout(struct net_device *dev)
5885 {
5886         struct tg3 *tp = netdev_priv(dev);
5887
5888         if (netif_msg_tx_err(tp)) {
5889                 netdev_err(dev, "transmit timed out, resetting\n");
5890                 tg3_dump_state(tp);
5891         }
5892
5893         schedule_work(&tp->reset_task);
5894 }
5895
5896 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5897 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5898 {
5899         u32 base = (u32) mapping & 0xffffffff;
5900
5901         return (base > 0xffffdcc0) && (base + len + 8 < base);
5902 }
5903
5904 /* Test for DMA addresses > 40-bit */
5905 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5906                                           int len)
5907 {
5908 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5909         if (tg3_flag(tp, 40BIT_DMA_BUG))
5910                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5911         return 0;
5912 #else
5913         return 0;
5914 #endif
5915 }
5916
5917 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5918                                  dma_addr_t mapping, u32 len, u32 flags,
5919                                  u32 mss, u32 vlan)
5920 {
5921         txbd->addr_hi = ((u64) mapping >> 32);
5922         txbd->addr_lo = ((u64) mapping & 0xffffffff);
5923         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5924         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5925 }
5926
5927 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 entry,
5928                             dma_addr_t map, u32 len, u32 flags,
5929                             u32 mss, u32 vlan)
5930 {
5931         struct tg3 *tp = tnapi->tp;
5932         bool hwbug = false;
5933
5934         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5935                 hwbug = 1;
5936
5937         if (tg3_4g_overflow_test(map, len))
5938                 hwbug = 1;
5939
5940         if (tg3_40bit_overflow_test(tp, map, len))
5941                 hwbug = 1;
5942
5943         tg3_tx_set_bd(&tnapi->tx_ring[entry], map, len, flags, mss, vlan);
5944
5945         return hwbug;
5946 }
5947
5948 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5949 {
5950         int i;
5951         struct sk_buff *skb;
5952         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5953
5954         skb = txb->skb;
5955         txb->skb = NULL;
5956
5957         pci_unmap_single(tnapi->tp->pdev,
5958                          dma_unmap_addr(txb, mapping),
5959                          skb_headlen(skb),
5960                          PCI_DMA_TODEVICE);
5961
5962         while (txb->fragmented) {
5963                 txb->fragmented = false;
5964                 entry = NEXT_TX(entry);
5965                 txb = &tnapi->tx_buffers[entry];
5966         }
5967
5968         for (i = 0; i < last; i++) {
5969                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5970
5971                 entry = NEXT_TX(entry);
5972                 txb = &tnapi->tx_buffers[entry];
5973
5974                 pci_unmap_page(tnapi->tp->pdev,
5975                                dma_unmap_addr(txb, mapping),
5976                                frag->size, PCI_DMA_TODEVICE);
5977
5978                 while (txb->fragmented) {
5979                         txb->fragmented = false;
5980                         entry = NEXT_TX(entry);
5981                         txb = &tnapi->tx_buffers[entry];
5982                 }
5983         }
5984 }
5985
5986 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5987 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5988                                        struct sk_buff *skb,
5989                                        u32 base_flags, u32 mss, u32 vlan)
5990 {
5991         struct tg3 *tp = tnapi->tp;
5992         struct sk_buff *new_skb;
5993         dma_addr_t new_addr = 0;
5994         u32 entry = tnapi->tx_prod;
5995         int ret = 0;
5996
5997         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5998                 new_skb = skb_copy(skb, GFP_ATOMIC);
5999         else {
6000                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6001
6002                 new_skb = skb_copy_expand(skb,
6003                                           skb_headroom(skb) + more_headroom,
6004                                           skb_tailroom(skb), GFP_ATOMIC);
6005         }
6006
6007         if (!new_skb) {
6008                 ret = -1;
6009         } else {
6010                 /* New SKB is guaranteed to be linear. */
6011                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6012                                           PCI_DMA_TODEVICE);
6013                 /* Make sure the mapping succeeded */
6014                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6015                         dev_kfree_skb(new_skb);
6016                         ret = -1;
6017                 } else {
6018                         base_flags |= TXD_FLAG_END;
6019
6020                         tnapi->tx_buffers[entry].skb = new_skb;
6021                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
6022                                            mapping, new_addr);
6023
6024                         if (tg3_tx_frag_set(tnapi, entry, new_addr,
6025                                             new_skb->len, base_flags,
6026                                             mss, vlan)) {
6027                                 tg3_tx_skb_unmap(tnapi, entry, 0);
6028                                 dev_kfree_skb(new_skb);
6029                                 ret = -1;
6030                         }
6031                 }
6032         }
6033
6034         dev_kfree_skb(skb);
6035
6036         return ret;
6037 }
6038
6039 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6040
6041 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6042  * TSO header is greater than 80 bytes.
6043  */
6044 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6045 {
6046         struct sk_buff *segs, *nskb;
6047         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6048
6049         /* Estimate the number of fragments in the worst case */
6050         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6051                 netif_stop_queue(tp->dev);
6052
6053                 /* netif_tx_stop_queue() must be done before checking
6054                  * checking tx index in tg3_tx_avail() below, because in
6055                  * tg3_tx(), we update tx index before checking for
6056                  * netif_tx_queue_stopped().
6057                  */
6058                 smp_mb();
6059                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6060                         return NETDEV_TX_BUSY;
6061
6062                 netif_wake_queue(tp->dev);
6063         }
6064
6065         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6066         if (IS_ERR(segs))
6067                 goto tg3_tso_bug_end;
6068
6069         do {
6070                 nskb = segs;
6071                 segs = segs->next;
6072                 nskb->next = NULL;
6073                 tg3_start_xmit(nskb, tp->dev);
6074         } while (segs);
6075
6076 tg3_tso_bug_end:
6077         dev_kfree_skb(skb);
6078
6079         return NETDEV_TX_OK;
6080 }
6081
6082 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6083  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6084  */
6085 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6086 {
6087         struct tg3 *tp = netdev_priv(dev);
6088         u32 len, entry, base_flags, mss, vlan = 0;
6089         int i = -1, would_hit_hwbug;
6090         dma_addr_t mapping;
6091         struct tg3_napi *tnapi;
6092         struct netdev_queue *txq;
6093         unsigned int last;
6094
6095         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6096         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6097         if (tg3_flag(tp, ENABLE_TSS))
6098                 tnapi++;
6099
6100         /* We are running in BH disabled context with netif_tx_lock
6101          * and TX reclaim runs via tp->napi.poll inside of a software
6102          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6103          * no IRQ context deadlocks to worry about either.  Rejoice!
6104          */
6105         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6106                 if (!netif_tx_queue_stopped(txq)) {
6107                         netif_tx_stop_queue(txq);
6108
6109                         /* This is a hard error, log it. */
6110                         netdev_err(dev,
6111                                    "BUG! Tx Ring full when queue awake!\n");
6112                 }
6113                 return NETDEV_TX_BUSY;
6114         }
6115
6116         entry = tnapi->tx_prod;
6117         base_flags = 0;
6118         if (skb->ip_summed == CHECKSUM_PARTIAL)
6119                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6120
6121         mss = skb_shinfo(skb)->gso_size;
6122         if (mss) {
6123                 struct iphdr *iph;
6124                 u32 tcp_opt_len, hdr_len;
6125
6126                 if (skb_header_cloned(skb) &&
6127                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6128                         dev_kfree_skb(skb);
6129                         goto out_unlock;
6130                 }
6131
6132                 iph = ip_hdr(skb);
6133                 tcp_opt_len = tcp_optlen(skb);
6134
6135                 if (skb_is_gso_v6(skb)) {
6136                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6137                 } else {
6138                         u32 ip_tcp_len;
6139
6140                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6141                         hdr_len = ip_tcp_len + tcp_opt_len;
6142
6143                         iph->check = 0;
6144                         iph->tot_len = htons(mss + hdr_len);
6145                 }
6146
6147                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6148                     tg3_flag(tp, TSO_BUG))
6149                         return tg3_tso_bug(tp, skb);
6150
6151                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6152                                TXD_FLAG_CPU_POST_DMA);
6153
6154                 if (tg3_flag(tp, HW_TSO_1) ||
6155                     tg3_flag(tp, HW_TSO_2) ||
6156                     tg3_flag(tp, HW_TSO_3)) {
6157                         tcp_hdr(skb)->check = 0;
6158                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6159                 } else
6160                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6161                                                                  iph->daddr, 0,
6162                                                                  IPPROTO_TCP,
6163                                                                  0);
6164
6165                 if (tg3_flag(tp, HW_TSO_3)) {
6166                         mss |= (hdr_len & 0xc) << 12;
6167                         if (hdr_len & 0x10)
6168                                 base_flags |= 0x00000010;
6169                         base_flags |= (hdr_len & 0x3e0) << 5;
6170                 } else if (tg3_flag(tp, HW_TSO_2))
6171                         mss |= hdr_len << 9;
6172                 else if (tg3_flag(tp, HW_TSO_1) ||
6173                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6174                         if (tcp_opt_len || iph->ihl > 5) {
6175                                 int tsflags;
6176
6177                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6178                                 mss |= (tsflags << 11);
6179                         }
6180                 } else {
6181                         if (tcp_opt_len || iph->ihl > 5) {
6182                                 int tsflags;
6183
6184                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6185                                 base_flags |= tsflags << 12;
6186                         }
6187                 }
6188         }
6189
6190 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6191         if (vlan_tx_tag_present(skb)) {
6192                 base_flags |= TXD_FLAG_VLAN;
6193                 vlan = vlan_tx_tag_get(skb);
6194         }
6195 #endif
6196
6197         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6198             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6199                 base_flags |= TXD_FLAG_JMB_PKT;
6200
6201         len = skb_headlen(skb);
6202
6203         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6204         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6205                 dev_kfree_skb(skb);
6206                 goto out_unlock;
6207         }
6208
6209         tnapi->tx_buffers[entry].skb = skb;
6210         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6211
6212         would_hit_hwbug = 0;
6213
6214         if (tg3_flag(tp, 5701_DMA_BUG))
6215                 would_hit_hwbug = 1;
6216
6217         if (tg3_tx_frag_set(tnapi, entry, mapping, len, base_flags |
6218                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6219                             mss, vlan))
6220                 would_hit_hwbug = 1;
6221
6222         entry = NEXT_TX(entry);
6223
6224         /* Now loop through additional data fragments, and queue them. */
6225         if (skb_shinfo(skb)->nr_frags > 0) {
6226                 u32 tmp_mss = mss;
6227
6228                 if (!tg3_flag(tp, HW_TSO_1) &&
6229                     !tg3_flag(tp, HW_TSO_2) &&
6230                     !tg3_flag(tp, HW_TSO_3))
6231                         tmp_mss = 0;
6232
6233                 last = skb_shinfo(skb)->nr_frags - 1;
6234                 for (i = 0; i <= last; i++) {
6235                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6236
6237                         len = frag->size;
6238                         mapping = pci_map_page(tp->pdev,
6239                                                frag->page,
6240                                                frag->page_offset,
6241                                                len, PCI_DMA_TODEVICE);
6242
6243                         tnapi->tx_buffers[entry].skb = NULL;
6244                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6245                                            mapping);
6246                         if (pci_dma_mapping_error(tp->pdev, mapping))
6247                                 goto dma_error;
6248
6249                         if (tg3_tx_frag_set(tnapi, entry, mapping, len,
6250                                   base_flags | ((i == last) ? TXD_FLAG_END : 0),
6251                                             tmp_mss, vlan))
6252                                 would_hit_hwbug = 1;
6253
6254                         entry = NEXT_TX(entry);
6255                 }
6256         }
6257
6258         if (would_hit_hwbug) {
6259                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6260
6261                 /* If the workaround fails due to memory/mapping
6262                  * failure, silently drop this packet.
6263                  */
6264                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags,
6265                                                 mss, vlan))
6266                         goto out_unlock;
6267
6268                 entry = NEXT_TX(tnapi->tx_prod);
6269         }
6270
6271         skb_tx_timestamp(skb);
6272
6273         /* Packets are ready, update Tx producer idx local and on card. */
6274         tw32_tx_mbox(tnapi->prodmbox, entry);
6275
6276         tnapi->tx_prod = entry;
6277         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6278                 netif_tx_stop_queue(txq);
6279
6280                 /* netif_tx_stop_queue() must be done before checking
6281                  * checking tx index in tg3_tx_avail() below, because in
6282                  * tg3_tx(), we update tx index before checking for
6283                  * netif_tx_queue_stopped().
6284                  */
6285                 smp_mb();
6286                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6287                         netif_tx_wake_queue(txq);
6288         }
6289
6290 out_unlock:
6291         mmiowb();
6292
6293         return NETDEV_TX_OK;
6294
6295 dma_error:
6296         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6297         dev_kfree_skb(skb);
6298         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6299         return NETDEV_TX_OK;
6300 }
6301
6302 static void tg3_set_loopback(struct net_device *dev, u32 features)
6303 {
6304         struct tg3 *tp = netdev_priv(dev);
6305
6306         if (features & NETIF_F_LOOPBACK) {
6307                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6308                         return;
6309
6310                 /*
6311                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6312                  * loopback mode if Half-Duplex mode was negotiated earlier.
6313                  */
6314                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6315
6316                 /* Enable internal MAC loopback mode */
6317                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6318                 spin_lock_bh(&tp->lock);
6319                 tw32(MAC_MODE, tp->mac_mode);
6320                 netif_carrier_on(tp->dev);
6321                 spin_unlock_bh(&tp->lock);
6322                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6323         } else {
6324                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6325                         return;
6326
6327                 /* Disable internal MAC loopback mode */
6328                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6329                 spin_lock_bh(&tp->lock);
6330                 tw32(MAC_MODE, tp->mac_mode);
6331                 /* Force link status check */
6332                 tg3_setup_phy(tp, 1);
6333                 spin_unlock_bh(&tp->lock);
6334                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6335         }
6336 }
6337
6338 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6339 {
6340         struct tg3 *tp = netdev_priv(dev);
6341
6342         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6343                 features &= ~NETIF_F_ALL_TSO;
6344
6345         return features;
6346 }
6347
6348 static int tg3_set_features(struct net_device *dev, u32 features)
6349 {
6350         u32 changed = dev->features ^ features;
6351
6352         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6353                 tg3_set_loopback(dev, features);
6354
6355         return 0;
6356 }
6357
6358 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6359                                int new_mtu)
6360 {
6361         dev->mtu = new_mtu;
6362
6363         if (new_mtu > ETH_DATA_LEN) {
6364                 if (tg3_flag(tp, 5780_CLASS)) {
6365                         netdev_update_features(dev);
6366                         tg3_flag_clear(tp, TSO_CAPABLE);
6367                 } else {
6368                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6369                 }
6370         } else {
6371                 if (tg3_flag(tp, 5780_CLASS)) {
6372                         tg3_flag_set(tp, TSO_CAPABLE);
6373                         netdev_update_features(dev);
6374                 }
6375                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6376         }
6377 }
6378
6379 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6380 {
6381         struct tg3 *tp = netdev_priv(dev);
6382         int err;
6383
6384         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6385                 return -EINVAL;
6386
6387         if (!netif_running(dev)) {
6388                 /* We'll just catch it later when the
6389                  * device is up'd.
6390                  */
6391                 tg3_set_mtu(dev, tp, new_mtu);
6392                 return 0;
6393         }
6394
6395         tg3_phy_stop(tp);
6396
6397         tg3_netif_stop(tp);
6398
6399         tg3_full_lock(tp, 1);
6400
6401         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6402
6403         tg3_set_mtu(dev, tp, new_mtu);
6404
6405         err = tg3_restart_hw(tp, 0);
6406
6407         if (!err)
6408                 tg3_netif_start(tp);
6409
6410         tg3_full_unlock(tp);
6411
6412         if (!err)
6413                 tg3_phy_start(tp);
6414
6415         return err;
6416 }
6417
6418 static void tg3_rx_prodring_free(struct tg3 *tp,
6419                                  struct tg3_rx_prodring_set *tpr)
6420 {
6421         int i;
6422
6423         if (tpr != &tp->napi[0].prodring) {
6424                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6425                      i = (i + 1) & tp->rx_std_ring_mask)
6426                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6427                                         tp->rx_pkt_map_sz);
6428
6429                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6430                         for (i = tpr->rx_jmb_cons_idx;
6431                              i != tpr->rx_jmb_prod_idx;
6432                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6433                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6434                                                 TG3_RX_JMB_MAP_SZ);
6435                         }
6436                 }
6437
6438                 return;
6439         }
6440
6441         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6442                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6443                                 tp->rx_pkt_map_sz);
6444
6445         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6446                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6447                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6448                                         TG3_RX_JMB_MAP_SZ);
6449         }
6450 }
6451
6452 /* Initialize rx rings for packet processing.
6453  *
6454  * The chip has been shut down and the driver detached from
6455  * the networking, so no interrupts or new tx packets will
6456  * end up in the driver.  tp->{tx,}lock are held and thus
6457  * we may not sleep.
6458  */
6459 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6460                                  struct tg3_rx_prodring_set *tpr)
6461 {
6462         u32 i, rx_pkt_dma_sz;
6463
6464         tpr->rx_std_cons_idx = 0;
6465         tpr->rx_std_prod_idx = 0;
6466         tpr->rx_jmb_cons_idx = 0;
6467         tpr->rx_jmb_prod_idx = 0;
6468
6469         if (tpr != &tp->napi[0].prodring) {
6470                 memset(&tpr->rx_std_buffers[0], 0,
6471                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6472                 if (tpr->rx_jmb_buffers)
6473                         memset(&tpr->rx_jmb_buffers[0], 0,
6474                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6475                 goto done;
6476         }
6477
6478         /* Zero out all descriptors. */
6479         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6480
6481         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6482         if (tg3_flag(tp, 5780_CLASS) &&
6483             tp->dev->mtu > ETH_DATA_LEN)
6484                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6485         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6486
6487         /* Initialize invariants of the rings, we only set this
6488          * stuff once.  This works because the card does not
6489          * write into the rx buffer posting rings.
6490          */
6491         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6492                 struct tg3_rx_buffer_desc *rxd;
6493
6494                 rxd = &tpr->rx_std[i];
6495                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6496                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6497                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6498                                (i << RXD_OPAQUE_INDEX_SHIFT));
6499         }
6500
6501         /* Now allocate fresh SKBs for each rx ring. */
6502         for (i = 0; i < tp->rx_pending; i++) {
6503                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6504                         netdev_warn(tp->dev,
6505                                     "Using a smaller RX standard ring. Only "
6506                                     "%d out of %d buffers were allocated "
6507                                     "successfully\n", i, tp->rx_pending);
6508                         if (i == 0)
6509                                 goto initfail;
6510                         tp->rx_pending = i;
6511                         break;
6512                 }
6513         }
6514
6515         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6516                 goto done;
6517
6518         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6519
6520         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6521                 goto done;
6522
6523         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6524                 struct tg3_rx_buffer_desc *rxd;
6525
6526                 rxd = &tpr->rx_jmb[i].std;
6527                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6528                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6529                                   RXD_FLAG_JUMBO;
6530                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6531                        (i << RXD_OPAQUE_INDEX_SHIFT));
6532         }
6533
6534         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6535                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6536                         netdev_warn(tp->dev,
6537                                     "Using a smaller RX jumbo ring. Only %d "
6538                                     "out of %d buffers were allocated "
6539                                     "successfully\n", i, tp->rx_jumbo_pending);
6540                         if (i == 0)
6541                                 goto initfail;
6542                         tp->rx_jumbo_pending = i;
6543                         break;
6544                 }
6545         }
6546
6547 done:
6548         return 0;
6549
6550 initfail:
6551         tg3_rx_prodring_free(tp, tpr);
6552         return -ENOMEM;
6553 }
6554
6555 static void tg3_rx_prodring_fini(struct tg3 *tp,
6556                                  struct tg3_rx_prodring_set *tpr)
6557 {
6558         kfree(tpr->rx_std_buffers);
6559         tpr->rx_std_buffers = NULL;
6560         kfree(tpr->rx_jmb_buffers);
6561         tpr->rx_jmb_buffers = NULL;
6562         if (tpr->rx_std) {
6563                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6564                                   tpr->rx_std, tpr->rx_std_mapping);
6565                 tpr->rx_std = NULL;
6566         }
6567         if (tpr->rx_jmb) {
6568                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6569                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6570                 tpr->rx_jmb = NULL;
6571         }
6572 }
6573
6574 static int tg3_rx_prodring_init(struct tg3 *tp,
6575                                 struct tg3_rx_prodring_set *tpr)
6576 {
6577         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6578                                       GFP_KERNEL);
6579         if (!tpr->rx_std_buffers)
6580                 return -ENOMEM;
6581
6582         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6583                                          TG3_RX_STD_RING_BYTES(tp),
6584                                          &tpr->rx_std_mapping,
6585                                          GFP_KERNEL);
6586         if (!tpr->rx_std)
6587                 goto err_out;
6588
6589         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6590                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6591                                               GFP_KERNEL);
6592                 if (!tpr->rx_jmb_buffers)
6593                         goto err_out;
6594
6595                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6596                                                  TG3_RX_JMB_RING_BYTES(tp),
6597                                                  &tpr->rx_jmb_mapping,
6598                                                  GFP_KERNEL);
6599                 if (!tpr->rx_jmb)
6600                         goto err_out;
6601         }
6602
6603         return 0;
6604
6605 err_out:
6606         tg3_rx_prodring_fini(tp, tpr);
6607         return -ENOMEM;
6608 }
6609
6610 /* Free up pending packets in all rx/tx rings.
6611  *
6612  * The chip has been shut down and the driver detached from
6613  * the networking, so no interrupts or new tx packets will
6614  * end up in the driver.  tp->{tx,}lock is not held and we are not
6615  * in an interrupt context and thus may sleep.
6616  */
6617 static void tg3_free_rings(struct tg3 *tp)
6618 {
6619         int i, j;
6620
6621         for (j = 0; j < tp->irq_cnt; j++) {
6622                 struct tg3_napi *tnapi = &tp->napi[j];
6623
6624                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6625
6626                 if (!tnapi->tx_buffers)
6627                         continue;
6628
6629                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6630                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6631
6632                         if (!skb)
6633                                 continue;
6634
6635                         tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6636
6637                         dev_kfree_skb_any(skb);
6638                 }
6639         }
6640 }
6641
6642 /* Initialize tx/rx rings for packet processing.
6643  *
6644  * The chip has been shut down and the driver detached from
6645  * the networking, so no interrupts or new tx packets will
6646  * end up in the driver.  tp->{tx,}lock are held and thus
6647  * we may not sleep.
6648  */
6649 static int tg3_init_rings(struct tg3 *tp)
6650 {
6651         int i;
6652
6653         /* Free up all the SKBs. */
6654         tg3_free_rings(tp);
6655
6656         for (i = 0; i < tp->irq_cnt; i++) {
6657                 struct tg3_napi *tnapi = &tp->napi[i];
6658
6659                 tnapi->last_tag = 0;
6660                 tnapi->last_irq_tag = 0;
6661                 tnapi->hw_status->status = 0;
6662                 tnapi->hw_status->status_tag = 0;
6663                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6664
6665                 tnapi->tx_prod = 0;
6666                 tnapi->tx_cons = 0;
6667                 if (tnapi->tx_ring)
6668                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6669
6670                 tnapi->rx_rcb_ptr = 0;
6671                 if (tnapi->rx_rcb)
6672                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6673
6674                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6675                         tg3_free_rings(tp);
6676                         return -ENOMEM;
6677                 }
6678         }
6679
6680         return 0;
6681 }
6682
6683 /*
6684  * Must not be invoked with interrupt sources disabled and
6685  * the hardware shutdown down.
6686  */
6687 static void tg3_free_consistent(struct tg3 *tp)
6688 {
6689         int i;
6690
6691         for (i = 0; i < tp->irq_cnt; i++) {
6692                 struct tg3_napi *tnapi = &tp->napi[i];
6693
6694                 if (tnapi->tx_ring) {
6695                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6696                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6697                         tnapi->tx_ring = NULL;
6698                 }
6699
6700                 kfree(tnapi->tx_buffers);
6701                 tnapi->tx_buffers = NULL;
6702
6703                 if (tnapi->rx_rcb) {
6704                         dma_free_coherent(&tp->pdev->dev,
6705                                           TG3_RX_RCB_RING_BYTES(tp),
6706                                           tnapi->rx_rcb,
6707                                           tnapi->rx_rcb_mapping);
6708                         tnapi->rx_rcb = NULL;
6709                 }
6710
6711                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6712
6713                 if (tnapi->hw_status) {
6714                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6715                                           tnapi->hw_status,
6716                                           tnapi->status_mapping);
6717                         tnapi->hw_status = NULL;
6718                 }
6719         }
6720
6721         if (tp->hw_stats) {
6722                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6723                                   tp->hw_stats, tp->stats_mapping);
6724                 tp->hw_stats = NULL;
6725         }
6726 }
6727
6728 /*
6729  * Must not be invoked with interrupt sources disabled and
6730  * the hardware shutdown down.  Can sleep.
6731  */
6732 static int tg3_alloc_consistent(struct tg3 *tp)
6733 {
6734         int i;
6735
6736         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6737                                           sizeof(struct tg3_hw_stats),
6738                                           &tp->stats_mapping,
6739                                           GFP_KERNEL);
6740         if (!tp->hw_stats)
6741                 goto err_out;
6742
6743         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6744
6745         for (i = 0; i < tp->irq_cnt; i++) {
6746                 struct tg3_napi *tnapi = &tp->napi[i];
6747                 struct tg3_hw_status *sblk;
6748
6749                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6750                                                       TG3_HW_STATUS_SIZE,
6751                                                       &tnapi->status_mapping,
6752                                                       GFP_KERNEL);
6753                 if (!tnapi->hw_status)
6754                         goto err_out;
6755
6756                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6757                 sblk = tnapi->hw_status;
6758
6759                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6760                         goto err_out;
6761
6762                 /* If multivector TSS is enabled, vector 0 does not handle
6763                  * tx interrupts.  Don't allocate any resources for it.
6764                  */
6765                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6766                     (i && tg3_flag(tp, ENABLE_TSS))) {
6767                         tnapi->tx_buffers = kzalloc(
6768                                                sizeof(struct tg3_tx_ring_info) *
6769                                                TG3_TX_RING_SIZE, GFP_KERNEL);
6770                         if (!tnapi->tx_buffers)
6771                                 goto err_out;
6772
6773                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6774                                                             TG3_TX_RING_BYTES,
6775                                                         &tnapi->tx_desc_mapping,
6776                                                             GFP_KERNEL);
6777                         if (!tnapi->tx_ring)
6778                                 goto err_out;
6779                 }
6780
6781                 /*
6782                  * When RSS is enabled, the status block format changes
6783                  * slightly.  The "rx_jumbo_consumer", "reserved",
6784                  * and "rx_mini_consumer" members get mapped to the
6785                  * other three rx return ring producer indexes.
6786                  */
6787                 switch (i) {
6788                 default:
6789                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6790                         break;
6791                 case 2:
6792                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6793                         break;
6794                 case 3:
6795                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6796                         break;
6797                 case 4:
6798                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6799                         break;
6800                 }
6801
6802                 /*
6803                  * If multivector RSS is enabled, vector 0 does not handle
6804                  * rx or tx interrupts.  Don't allocate any resources for it.
6805                  */
6806                 if (!i && tg3_flag(tp, ENABLE_RSS))
6807                         continue;
6808
6809                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6810                                                    TG3_RX_RCB_RING_BYTES(tp),
6811                                                    &tnapi->rx_rcb_mapping,
6812                                                    GFP_KERNEL);
6813                 if (!tnapi->rx_rcb)
6814                         goto err_out;
6815
6816                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6817         }
6818
6819         return 0;
6820
6821 err_out:
6822         tg3_free_consistent(tp);
6823         return -ENOMEM;
6824 }
6825
6826 #define MAX_WAIT_CNT 1000
6827
6828 /* To stop a block, clear the enable bit and poll till it
6829  * clears.  tp->lock is held.
6830  */
6831 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6832 {
6833         unsigned int i;
6834         u32 val;
6835
6836         if (tg3_flag(tp, 5705_PLUS)) {
6837                 switch (ofs) {
6838                 case RCVLSC_MODE:
6839                 case DMAC_MODE:
6840                 case MBFREE_MODE:
6841                 case BUFMGR_MODE:
6842                 case MEMARB_MODE:
6843                         /* We can't enable/disable these bits of the
6844                          * 5705/5750, just say success.
6845                          */
6846                         return 0;
6847
6848                 default:
6849                         break;
6850                 }
6851         }
6852
6853         val = tr32(ofs);
6854         val &= ~enable_bit;
6855         tw32_f(ofs, val);
6856
6857         for (i = 0; i < MAX_WAIT_CNT; i++) {
6858                 udelay(100);
6859                 val = tr32(ofs);
6860                 if ((val & enable_bit) == 0)
6861                         break;
6862         }
6863
6864         if (i == MAX_WAIT_CNT && !silent) {
6865                 dev_err(&tp->pdev->dev,
6866                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6867                         ofs, enable_bit);
6868                 return -ENODEV;
6869         }
6870
6871         return 0;
6872 }
6873
6874 /* tp->lock is held. */
6875 static int tg3_abort_hw(struct tg3 *tp, int silent)
6876 {
6877         int i, err;
6878
6879         tg3_disable_ints(tp);
6880
6881         tp->rx_mode &= ~RX_MODE_ENABLE;
6882         tw32_f(MAC_RX_MODE, tp->rx_mode);
6883         udelay(10);
6884
6885         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6886         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6887         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6888         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6889         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6890         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6891
6892         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6893         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6894         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6895         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6896         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6897         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6898         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6899
6900         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6901         tw32_f(MAC_MODE, tp->mac_mode);
6902         udelay(40);
6903
6904         tp->tx_mode &= ~TX_MODE_ENABLE;
6905         tw32_f(MAC_TX_MODE, tp->tx_mode);
6906
6907         for (i = 0; i < MAX_WAIT_CNT; i++) {
6908                 udelay(100);
6909                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6910                         break;
6911         }
6912         if (i >= MAX_WAIT_CNT) {
6913                 dev_err(&tp->pdev->dev,
6914                         "%s timed out, TX_MODE_ENABLE will not clear "
6915                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6916                 err |= -ENODEV;
6917         }
6918
6919         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6920         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6921         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6922
6923         tw32(FTQ_RESET, 0xffffffff);
6924         tw32(FTQ_RESET, 0x00000000);
6925
6926         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6927         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6928
6929         for (i = 0; i < tp->irq_cnt; i++) {
6930                 struct tg3_napi *tnapi = &tp->napi[i];
6931                 if (tnapi->hw_status)
6932                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6933         }
6934         if (tp->hw_stats)
6935                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6936
6937         return err;
6938 }
6939
6940 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6941 {
6942         int i;
6943         u32 apedata;
6944
6945         /* NCSI does not support APE events */
6946         if (tg3_flag(tp, APE_HAS_NCSI))
6947                 return;
6948
6949         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6950         if (apedata != APE_SEG_SIG_MAGIC)
6951                 return;
6952
6953         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6954         if (!(apedata & APE_FW_STATUS_READY))
6955                 return;
6956
6957         /* Wait for up to 1 millisecond for APE to service previous event. */
6958         for (i = 0; i < 10; i++) {
6959                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6960                         return;
6961
6962                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6963
6964                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6965                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6966                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6967
6968                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6969
6970                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6971                         break;
6972
6973                 udelay(100);
6974         }
6975
6976         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6977                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6978 }
6979
6980 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6981 {
6982         u32 event;
6983         u32 apedata;
6984
6985         if (!tg3_flag(tp, ENABLE_APE))
6986                 return;
6987
6988         switch (kind) {
6989         case RESET_KIND_INIT:
6990                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6991                                 APE_HOST_SEG_SIG_MAGIC);
6992                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6993                                 APE_HOST_SEG_LEN_MAGIC);
6994                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6995                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6996                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6997                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6998                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6999                                 APE_HOST_BEHAV_NO_PHYLOCK);
7000                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7001                                     TG3_APE_HOST_DRVR_STATE_START);
7002
7003                 event = APE_EVENT_STATUS_STATE_START;
7004                 break;
7005         case RESET_KIND_SHUTDOWN:
7006                 /* With the interface we are currently using,
7007                  * APE does not track driver state.  Wiping
7008                  * out the HOST SEGMENT SIGNATURE forces
7009                  * the APE to assume OS absent status.
7010                  */
7011                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7012
7013                 if (device_may_wakeup(&tp->pdev->dev) &&
7014                     tg3_flag(tp, WOL_ENABLE)) {
7015                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7016                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7017                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7018                 } else
7019                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7020
7021                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7022
7023                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7024                 break;
7025         case RESET_KIND_SUSPEND:
7026                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7027                 break;
7028         default:
7029                 return;
7030         }
7031
7032         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7033
7034         tg3_ape_send_event(tp, event);
7035 }
7036
7037 /* tp->lock is held. */
7038 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7039 {
7040         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7041                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7042
7043         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7044                 switch (kind) {
7045                 case RESET_KIND_INIT:
7046                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7047                                       DRV_STATE_START);
7048                         break;
7049
7050                 case RESET_KIND_SHUTDOWN:
7051                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7052                                       DRV_STATE_UNLOAD);
7053                         break;
7054
7055                 case RESET_KIND_SUSPEND:
7056                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7057                                       DRV_STATE_SUSPEND);
7058                         break;
7059
7060                 default:
7061                         break;
7062                 }
7063         }
7064
7065         if (kind == RESET_KIND_INIT ||
7066             kind == RESET_KIND_SUSPEND)
7067                 tg3_ape_driver_state_change(tp, kind);
7068 }
7069
7070 /* tp->lock is held. */
7071 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7072 {
7073         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7074                 switch (kind) {
7075                 case RESET_KIND_INIT:
7076                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7077                                       DRV_STATE_START_DONE);
7078                         break;
7079
7080                 case RESET_KIND_SHUTDOWN:
7081                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7082                                       DRV_STATE_UNLOAD_DONE);
7083                         break;
7084
7085                 default:
7086                         break;
7087                 }
7088         }
7089
7090         if (kind == RESET_KIND_SHUTDOWN)
7091                 tg3_ape_driver_state_change(tp, kind);
7092 }
7093
7094 /* tp->lock is held. */
7095 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7096 {
7097         if (tg3_flag(tp, ENABLE_ASF)) {
7098                 switch (kind) {
7099                 case RESET_KIND_INIT:
7100                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7101                                       DRV_STATE_START);
7102                         break;
7103
7104                 case RESET_KIND_SHUTDOWN:
7105                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7106                                       DRV_STATE_UNLOAD);
7107                         break;
7108
7109                 case RESET_KIND_SUSPEND:
7110                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7111                                       DRV_STATE_SUSPEND);
7112                         break;
7113
7114                 default:
7115                         break;
7116                 }
7117         }
7118 }
7119
7120 static int tg3_poll_fw(struct tg3 *tp)
7121 {
7122         int i;
7123         u32 val;
7124
7125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7126                 /* Wait up to 20ms for init done. */
7127                 for (i = 0; i < 200; i++) {
7128                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7129                                 return 0;
7130                         udelay(100);
7131                 }
7132                 return -ENODEV;
7133         }
7134
7135         /* Wait for firmware initialization to complete. */
7136         for (i = 0; i < 100000; i++) {
7137                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7138                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7139                         break;
7140                 udelay(10);
7141         }
7142
7143         /* Chip might not be fitted with firmware.  Some Sun onboard
7144          * parts are configured like that.  So don't signal the timeout
7145          * of the above loop as an error, but do report the lack of
7146          * running firmware once.
7147          */
7148         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7149                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7150
7151                 netdev_info(tp->dev, "No firmware running\n");
7152         }
7153
7154         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7155                 /* The 57765 A0 needs a little more
7156                  * time to do some important work.
7157                  */
7158                 mdelay(10);
7159         }
7160
7161         return 0;
7162 }
7163
7164 /* Save PCI command register before chip reset */
7165 static void tg3_save_pci_state(struct tg3 *tp)
7166 {
7167         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7168 }
7169
7170 /* Restore PCI state after chip reset */
7171 static void tg3_restore_pci_state(struct tg3 *tp)
7172 {
7173         u32 val;
7174
7175         /* Re-enable indirect register accesses. */
7176         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7177                                tp->misc_host_ctrl);
7178
7179         /* Set MAX PCI retry to zero. */
7180         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7181         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7182             tg3_flag(tp, PCIX_MODE))
7183                 val |= PCISTATE_RETRY_SAME_DMA;
7184         /* Allow reads and writes to the APE register and memory space. */
7185         if (tg3_flag(tp, ENABLE_APE))
7186                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7187                        PCISTATE_ALLOW_APE_SHMEM_WR |
7188                        PCISTATE_ALLOW_APE_PSPACE_WR;
7189         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7190
7191         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7192
7193         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7194                 if (tg3_flag(tp, PCI_EXPRESS))
7195                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7196                 else {
7197                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7198                                               tp->pci_cacheline_sz);
7199                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7200                                               tp->pci_lat_timer);
7201                 }
7202         }
7203
7204         /* Make sure PCI-X relaxed ordering bit is clear. */
7205         if (tg3_flag(tp, PCIX_MODE)) {
7206                 u16 pcix_cmd;
7207
7208                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7209                                      &pcix_cmd);
7210                 pcix_cmd &= ~PCI_X_CMD_ERO;
7211                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7212                                       pcix_cmd);
7213         }
7214
7215         if (tg3_flag(tp, 5780_CLASS)) {
7216
7217                 /* Chip reset on 5780 will reset MSI enable bit,
7218                  * so need to restore it.
7219                  */
7220                 if (tg3_flag(tp, USING_MSI)) {
7221                         u16 ctrl;
7222
7223                         pci_read_config_word(tp->pdev,
7224                                              tp->msi_cap + PCI_MSI_FLAGS,
7225                                              &ctrl);
7226                         pci_write_config_word(tp->pdev,
7227                                               tp->msi_cap + PCI_MSI_FLAGS,
7228                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7229                         val = tr32(MSGINT_MODE);
7230                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7231                 }
7232         }
7233 }
7234
7235 static void tg3_stop_fw(struct tg3 *);
7236
7237 /* tp->lock is held. */
7238 static int tg3_chip_reset(struct tg3 *tp)
7239 {
7240         u32 val;
7241         void (*write_op)(struct tg3 *, u32, u32);
7242         int i, err;
7243
7244         tg3_nvram_lock(tp);
7245
7246         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7247
7248         /* No matching tg3_nvram_unlock() after this because
7249          * chip reset below will undo the nvram lock.
7250          */
7251         tp->nvram_lock_cnt = 0;
7252
7253         /* GRC_MISC_CFG core clock reset will clear the memory
7254          * enable bit in PCI register 4 and the MSI enable bit
7255          * on some chips, so we save relevant registers here.
7256          */
7257         tg3_save_pci_state(tp);
7258
7259         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7260             tg3_flag(tp, 5755_PLUS))
7261                 tw32(GRC_FASTBOOT_PC, 0);
7262
7263         /*
7264          * We must avoid the readl() that normally takes place.
7265          * It locks machines, causes machine checks, and other
7266          * fun things.  So, temporarily disable the 5701
7267          * hardware workaround, while we do the reset.
7268          */
7269         write_op = tp->write32;
7270         if (write_op == tg3_write_flush_reg32)
7271                 tp->write32 = tg3_write32;
7272
7273         /* Prevent the irq handler from reading or writing PCI registers
7274          * during chip reset when the memory enable bit in the PCI command
7275          * register may be cleared.  The chip does not generate interrupt
7276          * at this time, but the irq handler may still be called due to irq
7277          * sharing or irqpoll.
7278          */
7279         tg3_flag_set(tp, CHIP_RESETTING);
7280         for (i = 0; i < tp->irq_cnt; i++) {
7281                 struct tg3_napi *tnapi = &tp->napi[i];
7282                 if (tnapi->hw_status) {
7283                         tnapi->hw_status->status = 0;
7284                         tnapi->hw_status->status_tag = 0;
7285                 }
7286                 tnapi->last_tag = 0;
7287                 tnapi->last_irq_tag = 0;
7288         }
7289         smp_mb();
7290
7291         for (i = 0; i < tp->irq_cnt; i++)
7292                 synchronize_irq(tp->napi[i].irq_vec);
7293
7294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7295                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7296                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7297         }
7298
7299         /* do the reset */
7300         val = GRC_MISC_CFG_CORECLK_RESET;
7301
7302         if (tg3_flag(tp, PCI_EXPRESS)) {
7303                 /* Force PCIe 1.0a mode */
7304                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7305                     !tg3_flag(tp, 57765_PLUS) &&
7306                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7307                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7308                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7309
7310                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7311                         tw32(GRC_MISC_CFG, (1 << 29));
7312                         val |= (1 << 29);
7313                 }
7314         }
7315
7316         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7317                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7318                 tw32(GRC_VCPU_EXT_CTRL,
7319                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7320         }
7321
7322         /* Manage gphy power for all CPMU absent PCIe devices. */
7323         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7324                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7325
7326         tw32(GRC_MISC_CFG, val);
7327
7328         /* restore 5701 hardware bug workaround write method */
7329         tp->write32 = write_op;
7330
7331         /* Unfortunately, we have to delay before the PCI read back.
7332          * Some 575X chips even will not respond to a PCI cfg access
7333          * when the reset command is given to the chip.
7334          *
7335          * How do these hardware designers expect things to work
7336          * properly if the PCI write is posted for a long period
7337          * of time?  It is always necessary to have some method by
7338          * which a register read back can occur to push the write
7339          * out which does the reset.
7340          *
7341          * For most tg3 variants the trick below was working.
7342          * Ho hum...
7343          */
7344         udelay(120);
7345
7346         /* Flush PCI posted writes.  The normal MMIO registers
7347          * are inaccessible at this time so this is the only
7348          * way to make this reliably (actually, this is no longer
7349          * the case, see above).  I tried to use indirect
7350          * register read/write but this upset some 5701 variants.
7351          */
7352         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7353
7354         udelay(120);
7355
7356         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7357                 u16 val16;
7358
7359                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7360                         int i;
7361                         u32 cfg_val;
7362
7363                         /* Wait for link training to complete.  */
7364                         for (i = 0; i < 5000; i++)
7365                                 udelay(100);
7366
7367                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7368                         pci_write_config_dword(tp->pdev, 0xc4,
7369                                                cfg_val | (1 << 15));
7370                 }
7371
7372                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7373                 pci_read_config_word(tp->pdev,
7374                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7375                                      &val16);
7376                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7377                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7378                 /*
7379                  * Older PCIe devices only support the 128 byte
7380                  * MPS setting.  Enforce the restriction.
7381                  */
7382                 if (!tg3_flag(tp, CPMU_PRESENT))
7383                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7384                 pci_write_config_word(tp->pdev,
7385                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7386                                       val16);
7387
7388                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7389
7390                 /* Clear error status */
7391                 pci_write_config_word(tp->pdev,
7392                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7393                                       PCI_EXP_DEVSTA_CED |
7394                                       PCI_EXP_DEVSTA_NFED |
7395                                       PCI_EXP_DEVSTA_FED |
7396                                       PCI_EXP_DEVSTA_URD);
7397         }
7398
7399         tg3_restore_pci_state(tp);
7400
7401         tg3_flag_clear(tp, CHIP_RESETTING);
7402         tg3_flag_clear(tp, ERROR_PROCESSED);
7403
7404         val = 0;
7405         if (tg3_flag(tp, 5780_CLASS))
7406                 val = tr32(MEMARB_MODE);
7407         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7408
7409         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7410                 tg3_stop_fw(tp);
7411                 tw32(0x5000, 0x400);
7412         }
7413
7414         tw32(GRC_MODE, tp->grc_mode);
7415
7416         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7417                 val = tr32(0xc4);
7418
7419                 tw32(0xc4, val | (1 << 15));
7420         }
7421
7422         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7423             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7424                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7425                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7426                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7427                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7428         }
7429
7430         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7431                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7432                 val = tp->mac_mode;
7433         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7434                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7435                 val = tp->mac_mode;
7436         } else
7437                 val = 0;
7438
7439         tw32_f(MAC_MODE, val);
7440         udelay(40);
7441
7442         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7443
7444         err = tg3_poll_fw(tp);
7445         if (err)
7446                 return err;
7447
7448         tg3_mdio_start(tp);
7449
7450         if (tg3_flag(tp, PCI_EXPRESS) &&
7451             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7452             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7453             !tg3_flag(tp, 57765_PLUS)) {
7454                 val = tr32(0x7c00);
7455
7456                 tw32(0x7c00, val | (1 << 25));
7457         }
7458
7459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7460                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7461                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7462         }
7463
7464         /* Reprobe ASF enable state.  */
7465         tg3_flag_clear(tp, ENABLE_ASF);
7466         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7467         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7468         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7469                 u32 nic_cfg;
7470
7471                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7472                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7473                         tg3_flag_set(tp, ENABLE_ASF);
7474                         tp->last_event_jiffies = jiffies;
7475                         if (tg3_flag(tp, 5750_PLUS))
7476                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7477                 }
7478         }
7479
7480         return 0;
7481 }
7482
7483 /* tp->lock is held. */
7484 static void tg3_stop_fw(struct tg3 *tp)
7485 {
7486         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7487                 /* Wait for RX cpu to ACK the previous event. */
7488                 tg3_wait_for_event_ack(tp);
7489
7490                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7491
7492                 tg3_generate_fw_event(tp);
7493
7494                 /* Wait for RX cpu to ACK this event. */
7495                 tg3_wait_for_event_ack(tp);
7496         }
7497 }
7498
7499 /* tp->lock is held. */
7500 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7501 {
7502         int err;
7503
7504         tg3_stop_fw(tp);
7505
7506         tg3_write_sig_pre_reset(tp, kind);
7507
7508         tg3_abort_hw(tp, silent);
7509         err = tg3_chip_reset(tp);
7510
7511         __tg3_set_mac_addr(tp, 0);
7512
7513         tg3_write_sig_legacy(tp, kind);
7514         tg3_write_sig_post_reset(tp, kind);
7515
7516         if (err)
7517                 return err;
7518
7519         return 0;
7520 }
7521
7522 #define RX_CPU_SCRATCH_BASE     0x30000
7523 #define RX_CPU_SCRATCH_SIZE     0x04000
7524 #define TX_CPU_SCRATCH_BASE     0x34000
7525 #define TX_CPU_SCRATCH_SIZE     0x04000
7526
7527 /* tp->lock is held. */
7528 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7529 {
7530         int i;
7531
7532         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7533
7534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7535                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7536
7537                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7538                 return 0;
7539         }
7540         if (offset == RX_CPU_BASE) {
7541                 for (i = 0; i < 10000; i++) {
7542                         tw32(offset + CPU_STATE, 0xffffffff);
7543                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7544                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7545                                 break;
7546                 }
7547
7548                 tw32(offset + CPU_STATE, 0xffffffff);
7549                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7550                 udelay(10);
7551         } else {
7552                 for (i = 0; i < 10000; i++) {
7553                         tw32(offset + CPU_STATE, 0xffffffff);
7554                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7555                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7556                                 break;
7557                 }
7558         }
7559
7560         if (i >= 10000) {
7561                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7562                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7563                 return -ENODEV;
7564         }
7565
7566         /* Clear firmware's nvram arbitration. */
7567         if (tg3_flag(tp, NVRAM))
7568                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7569         return 0;
7570 }
7571
7572 struct fw_info {
7573         unsigned int fw_base;
7574         unsigned int fw_len;
7575         const __be32 *fw_data;
7576 };
7577
7578 /* tp->lock is held. */
7579 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7580                                  int cpu_scratch_size, struct fw_info *info)
7581 {
7582         int err, lock_err, i;
7583         void (*write_op)(struct tg3 *, u32, u32);
7584
7585         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7586                 netdev_err(tp->dev,
7587                            "%s: Trying to load TX cpu firmware which is 5705\n",
7588                            __func__);
7589                 return -EINVAL;
7590         }
7591
7592         if (tg3_flag(tp, 5705_PLUS))
7593                 write_op = tg3_write_mem;
7594         else
7595                 write_op = tg3_write_indirect_reg32;
7596
7597         /* It is possible that bootcode is still loading at this point.
7598          * Get the nvram lock first before halting the cpu.
7599          */
7600         lock_err = tg3_nvram_lock(tp);
7601         err = tg3_halt_cpu(tp, cpu_base);
7602         if (!lock_err)
7603                 tg3_nvram_unlock(tp);
7604         if (err)
7605                 goto out;
7606
7607         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7608                 write_op(tp, cpu_scratch_base + i, 0);
7609         tw32(cpu_base + CPU_STATE, 0xffffffff);
7610         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7611         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7612                 write_op(tp, (cpu_scratch_base +
7613                               (info->fw_base & 0xffff) +
7614                               (i * sizeof(u32))),
7615                               be32_to_cpu(info->fw_data[i]));
7616
7617         err = 0;
7618
7619 out:
7620         return err;
7621 }
7622
7623 /* tp->lock is held. */
7624 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7625 {
7626         struct fw_info info;
7627         const __be32 *fw_data;
7628         int err, i;
7629
7630         fw_data = (void *)tp->fw->data;
7631
7632         /* Firmware blob starts with version numbers, followed by
7633            start address and length. We are setting complete length.
7634            length = end_address_of_bss - start_address_of_text.
7635            Remainder is the blob to be loaded contiguously
7636            from start address. */
7637
7638         info.fw_base = be32_to_cpu(fw_data[1]);
7639         info.fw_len = tp->fw->size - 12;
7640         info.fw_data = &fw_data[3];
7641
7642         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7643                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7644                                     &info);
7645         if (err)
7646                 return err;
7647
7648         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7649                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7650                                     &info);
7651         if (err)
7652                 return err;
7653
7654         /* Now startup only the RX cpu. */
7655         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7656         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7657
7658         for (i = 0; i < 5; i++) {
7659                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7660                         break;
7661                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7662                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7663                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7664                 udelay(1000);
7665         }
7666         if (i >= 5) {
7667                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7668                            "should be %08x\n", __func__,
7669                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7670                 return -ENODEV;
7671         }
7672         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7673         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7674
7675         return 0;
7676 }
7677
7678 /* tp->lock is held. */
7679 static int tg3_load_tso_firmware(struct tg3 *tp)
7680 {
7681         struct fw_info info;
7682         const __be32 *fw_data;
7683         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7684         int err, i;
7685
7686         if (tg3_flag(tp, HW_TSO_1) ||
7687             tg3_flag(tp, HW_TSO_2) ||
7688             tg3_flag(tp, HW_TSO_3))
7689                 return 0;
7690
7691         fw_data = (void *)tp->fw->data;
7692
7693         /* Firmware blob starts with version numbers, followed by
7694            start address and length. We are setting complete length.
7695            length = end_address_of_bss - start_address_of_text.
7696            Remainder is the blob to be loaded contiguously
7697            from start address. */
7698
7699         info.fw_base = be32_to_cpu(fw_data[1]);
7700         cpu_scratch_size = tp->fw_len;
7701         info.fw_len = tp->fw->size - 12;
7702         info.fw_data = &fw_data[3];
7703
7704         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7705                 cpu_base = RX_CPU_BASE;
7706                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7707         } else {
7708                 cpu_base = TX_CPU_BASE;
7709                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7710                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7711         }
7712
7713         err = tg3_load_firmware_cpu(tp, cpu_base,
7714                                     cpu_scratch_base, cpu_scratch_size,
7715                                     &info);
7716         if (err)
7717                 return err;
7718
7719         /* Now startup the cpu. */
7720         tw32(cpu_base + CPU_STATE, 0xffffffff);
7721         tw32_f(cpu_base + CPU_PC, info.fw_base);
7722
7723         for (i = 0; i < 5; i++) {
7724                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7725                         break;
7726                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7727                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7728                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7729                 udelay(1000);
7730         }
7731         if (i >= 5) {
7732                 netdev_err(tp->dev,
7733                            "%s fails to set CPU PC, is %08x should be %08x\n",
7734                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7735                 return -ENODEV;
7736         }
7737         tw32(cpu_base + CPU_STATE, 0xffffffff);
7738         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7739         return 0;
7740 }
7741
7742
7743 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7744 {
7745         struct tg3 *tp = netdev_priv(dev);
7746         struct sockaddr *addr = p;
7747         int err = 0, skip_mac_1 = 0;
7748
7749         if (!is_valid_ether_addr(addr->sa_data))
7750                 return -EINVAL;
7751
7752         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7753
7754         if (!netif_running(dev))
7755                 return 0;
7756
7757         if (tg3_flag(tp, ENABLE_ASF)) {
7758                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7759
7760                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7761                 addr0_low = tr32(MAC_ADDR_0_LOW);
7762                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7763                 addr1_low = tr32(MAC_ADDR_1_LOW);
7764
7765                 /* Skip MAC addr 1 if ASF is using it. */
7766                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7767                     !(addr1_high == 0 && addr1_low == 0))
7768                         skip_mac_1 = 1;
7769         }
7770         spin_lock_bh(&tp->lock);
7771         __tg3_set_mac_addr(tp, skip_mac_1);
7772         spin_unlock_bh(&tp->lock);
7773
7774         return err;
7775 }
7776
7777 /* tp->lock is held. */
7778 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7779                            dma_addr_t mapping, u32 maxlen_flags,
7780                            u32 nic_addr)
7781 {
7782         tg3_write_mem(tp,
7783                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7784                       ((u64) mapping >> 32));
7785         tg3_write_mem(tp,
7786                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7787                       ((u64) mapping & 0xffffffff));
7788         tg3_write_mem(tp,
7789                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7790                        maxlen_flags);
7791
7792         if (!tg3_flag(tp, 5705_PLUS))
7793                 tg3_write_mem(tp,
7794                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7795                               nic_addr);
7796 }
7797
7798 static void __tg3_set_rx_mode(struct net_device *);
7799 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7800 {
7801         int i;
7802
7803         if (!tg3_flag(tp, ENABLE_TSS)) {
7804                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7805                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7806                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7807         } else {
7808                 tw32(HOSTCC_TXCOL_TICKS, 0);
7809                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7810                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7811         }
7812
7813         if (!tg3_flag(tp, ENABLE_RSS)) {
7814                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7815                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7816                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7817         } else {
7818                 tw32(HOSTCC_RXCOL_TICKS, 0);
7819                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7820                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7821         }
7822
7823         if (!tg3_flag(tp, 5705_PLUS)) {
7824                 u32 val = ec->stats_block_coalesce_usecs;
7825
7826                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7827                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7828
7829                 if (!netif_carrier_ok(tp->dev))
7830                         val = 0;
7831
7832                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7833         }
7834
7835         for (i = 0; i < tp->irq_cnt - 1; i++) {
7836                 u32 reg;
7837
7838                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7839                 tw32(reg, ec->rx_coalesce_usecs);
7840                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7841                 tw32(reg, ec->rx_max_coalesced_frames);
7842                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7843                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7844
7845                 if (tg3_flag(tp, ENABLE_TSS)) {
7846                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7847                         tw32(reg, ec->tx_coalesce_usecs);
7848                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7849                         tw32(reg, ec->tx_max_coalesced_frames);
7850                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7851                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7852                 }
7853         }
7854
7855         for (; i < tp->irq_max - 1; i++) {
7856                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7857                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7858                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7859
7860                 if (tg3_flag(tp, ENABLE_TSS)) {
7861                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7862                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7863                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7864                 }
7865         }
7866 }
7867
7868 /* tp->lock is held. */
7869 static void tg3_rings_reset(struct tg3 *tp)
7870 {
7871         int i;
7872         u32 stblk, txrcb, rxrcb, limit;
7873         struct tg3_napi *tnapi = &tp->napi[0];
7874
7875         /* Disable all transmit rings but the first. */
7876         if (!tg3_flag(tp, 5705_PLUS))
7877                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7878         else if (tg3_flag(tp, 5717_PLUS))
7879                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7880         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7881                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7882         else
7883                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7884
7885         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7886              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7887                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7888                               BDINFO_FLAGS_DISABLED);
7889
7890
7891         /* Disable all receive return rings but the first. */
7892         if (tg3_flag(tp, 5717_PLUS))
7893                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7894         else if (!tg3_flag(tp, 5705_PLUS))
7895                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7896         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7897                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7898                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7899         else
7900                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7901
7902         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7903              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7904                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7905                               BDINFO_FLAGS_DISABLED);
7906
7907         /* Disable interrupts */
7908         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7909         tp->napi[0].chk_msi_cnt = 0;
7910         tp->napi[0].last_rx_cons = 0;
7911         tp->napi[0].last_tx_cons = 0;
7912
7913         /* Zero mailbox registers. */
7914         if (tg3_flag(tp, SUPPORT_MSIX)) {
7915                 for (i = 1; i < tp->irq_max; i++) {
7916                         tp->napi[i].tx_prod = 0;
7917                         tp->napi[i].tx_cons = 0;
7918                         if (tg3_flag(tp, ENABLE_TSS))
7919                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7920                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7921                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7922                         tp->napi[0].chk_msi_cnt = 0;
7923                         tp->napi[i].last_rx_cons = 0;
7924                         tp->napi[i].last_tx_cons = 0;
7925                 }
7926                 if (!tg3_flag(tp, ENABLE_TSS))
7927                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7928         } else {
7929                 tp->napi[0].tx_prod = 0;
7930                 tp->napi[0].tx_cons = 0;
7931                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7932                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7933         }
7934
7935         /* Make sure the NIC-based send BD rings are disabled. */
7936         if (!tg3_flag(tp, 5705_PLUS)) {
7937                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7938                 for (i = 0; i < 16; i++)
7939                         tw32_tx_mbox(mbox + i * 8, 0);
7940         }
7941
7942         txrcb = NIC_SRAM_SEND_RCB;
7943         rxrcb = NIC_SRAM_RCV_RET_RCB;
7944
7945         /* Clear status block in ram. */
7946         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7947
7948         /* Set status block DMA address */
7949         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7950              ((u64) tnapi->status_mapping >> 32));
7951         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7952              ((u64) tnapi->status_mapping & 0xffffffff));
7953
7954         if (tnapi->tx_ring) {
7955                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7956                                (TG3_TX_RING_SIZE <<
7957                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7958                                NIC_SRAM_TX_BUFFER_DESC);
7959                 txrcb += TG3_BDINFO_SIZE;
7960         }
7961
7962         if (tnapi->rx_rcb) {
7963                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7964                                (tp->rx_ret_ring_mask + 1) <<
7965                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7966                 rxrcb += TG3_BDINFO_SIZE;
7967         }
7968
7969         stblk = HOSTCC_STATBLCK_RING1;
7970
7971         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7972                 u64 mapping = (u64)tnapi->status_mapping;
7973                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7974                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7975
7976                 /* Clear status block in ram. */
7977                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7978
7979                 if (tnapi->tx_ring) {
7980                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7981                                        (TG3_TX_RING_SIZE <<
7982                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7983                                        NIC_SRAM_TX_BUFFER_DESC);
7984                         txrcb += TG3_BDINFO_SIZE;
7985                 }
7986
7987                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7988                                ((tp->rx_ret_ring_mask + 1) <<
7989                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7990
7991                 stblk += 8;
7992                 rxrcb += TG3_BDINFO_SIZE;
7993         }
7994 }
7995
7996 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7997 {
7998         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7999
8000         if (!tg3_flag(tp, 5750_PLUS) ||
8001             tg3_flag(tp, 5780_CLASS) ||
8002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8003             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8004                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8005         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8006                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8007                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8008         else
8009                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8010
8011         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8012         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8013
8014         val = min(nic_rep_thresh, host_rep_thresh);
8015         tw32(RCVBDI_STD_THRESH, val);
8016
8017         if (tg3_flag(tp, 57765_PLUS))
8018                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8019
8020         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8021                 return;
8022
8023         if (!tg3_flag(tp, 5705_PLUS))
8024                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8025         else
8026                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8027
8028         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8029
8030         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8031         tw32(RCVBDI_JUMBO_THRESH, val);
8032
8033         if (tg3_flag(tp, 57765_PLUS))
8034                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8035 }
8036
8037 /* tp->lock is held. */
8038 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8039 {
8040         u32 val, rdmac_mode;
8041         int i, err, limit;
8042         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8043
8044         tg3_disable_ints(tp);
8045
8046         tg3_stop_fw(tp);
8047
8048         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8049
8050         if (tg3_flag(tp, INIT_COMPLETE))
8051                 tg3_abort_hw(tp, 1);
8052
8053         /* Enable MAC control of LPI */
8054         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8055                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8056                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8057                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8058
8059                 tw32_f(TG3_CPMU_EEE_CTRL,
8060                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8061
8062                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8063                       TG3_CPMU_EEEMD_LPI_IN_TX |
8064                       TG3_CPMU_EEEMD_LPI_IN_RX |
8065                       TG3_CPMU_EEEMD_EEE_ENABLE;
8066
8067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8068                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8069
8070                 if (tg3_flag(tp, ENABLE_APE))
8071                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8072
8073                 tw32_f(TG3_CPMU_EEE_MODE, val);
8074
8075                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8076                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8077                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8078
8079                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8080                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8081                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8082         }
8083
8084         if (reset_phy)
8085                 tg3_phy_reset(tp);
8086
8087         err = tg3_chip_reset(tp);
8088         if (err)
8089                 return err;
8090
8091         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8092
8093         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8094                 val = tr32(TG3_CPMU_CTRL);
8095                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8096                 tw32(TG3_CPMU_CTRL, val);
8097
8098                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8099                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8100                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8101                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8102
8103                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8104                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8105                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8106                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8107
8108                 val = tr32(TG3_CPMU_HST_ACC);
8109                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8110                 val |= CPMU_HST_ACC_MACCLK_6_25;
8111                 tw32(TG3_CPMU_HST_ACC, val);
8112         }
8113
8114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8115                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8116                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8117                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8118                 tw32(PCIE_PWR_MGMT_THRESH, val);
8119
8120                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8121                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8122
8123                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8124
8125                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8126                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8127         }
8128
8129         if (tg3_flag(tp, L1PLLPD_EN)) {
8130                 u32 grc_mode = tr32(GRC_MODE);
8131
8132                 /* Access the lower 1K of PL PCIE block registers. */
8133                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8134                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8135
8136                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8137                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8138                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8139
8140                 tw32(GRC_MODE, grc_mode);
8141         }
8142
8143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8144                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8145                         u32 grc_mode = tr32(GRC_MODE);
8146
8147                         /* Access the lower 1K of PL PCIE block registers. */
8148                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8149                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8150
8151                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8152                                    TG3_PCIE_PL_LO_PHYCTL5);
8153                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8154                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8155
8156                         tw32(GRC_MODE, grc_mode);
8157                 }
8158
8159                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8160                         u32 grc_mode = tr32(GRC_MODE);
8161
8162                         /* Access the lower 1K of DL PCIE block registers. */
8163                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8164                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8165
8166                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8167                                    TG3_PCIE_DL_LO_FTSMAX);
8168                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8169                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8170                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8171
8172                         tw32(GRC_MODE, grc_mode);
8173                 }
8174
8175                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8176                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8177                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8178                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8179         }
8180
8181         /* This works around an issue with Athlon chipsets on
8182          * B3 tigon3 silicon.  This bit has no effect on any
8183          * other revision.  But do not set this on PCI Express
8184          * chips and don't even touch the clocks if the CPMU is present.
8185          */
8186         if (!tg3_flag(tp, CPMU_PRESENT)) {
8187                 if (!tg3_flag(tp, PCI_EXPRESS))
8188                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8189                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8190         }
8191
8192         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8193             tg3_flag(tp, PCIX_MODE)) {
8194                 val = tr32(TG3PCI_PCISTATE);
8195                 val |= PCISTATE_RETRY_SAME_DMA;
8196                 tw32(TG3PCI_PCISTATE, val);
8197         }
8198
8199         if (tg3_flag(tp, ENABLE_APE)) {
8200                 /* Allow reads and writes to the
8201                  * APE register and memory space.
8202                  */
8203                 val = tr32(TG3PCI_PCISTATE);
8204                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8205                        PCISTATE_ALLOW_APE_SHMEM_WR |
8206                        PCISTATE_ALLOW_APE_PSPACE_WR;
8207                 tw32(TG3PCI_PCISTATE, val);
8208         }
8209
8210         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8211                 /* Enable some hw fixes.  */
8212                 val = tr32(TG3PCI_MSI_DATA);
8213                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8214                 tw32(TG3PCI_MSI_DATA, val);
8215         }
8216
8217         /* Descriptor ring init may make accesses to the
8218          * NIC SRAM area to setup the TX descriptors, so we
8219          * can only do this after the hardware has been
8220          * successfully reset.
8221          */
8222         err = tg3_init_rings(tp);
8223         if (err)
8224                 return err;
8225
8226         if (tg3_flag(tp, 57765_PLUS)) {
8227                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8228                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8229                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8230                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8231                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8232                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8233                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8234                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8235         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8236                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8237                 /* This value is determined during the probe time DMA
8238                  * engine test, tg3_test_dma.
8239                  */
8240                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8241         }
8242
8243         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8244                           GRC_MODE_4X_NIC_SEND_RINGS |
8245                           GRC_MODE_NO_TX_PHDR_CSUM |
8246                           GRC_MODE_NO_RX_PHDR_CSUM);
8247         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8248
8249         /* Pseudo-header checksum is done by hardware logic and not
8250          * the offload processers, so make the chip do the pseudo-
8251          * header checksums on receive.  For transmit it is more
8252          * convenient to do the pseudo-header checksum in software
8253          * as Linux does that on transmit for us in all cases.
8254          */
8255         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8256
8257         tw32(GRC_MODE,
8258              tp->grc_mode |
8259              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8260
8261         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8262         val = tr32(GRC_MISC_CFG);
8263         val &= ~0xff;
8264         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8265         tw32(GRC_MISC_CFG, val);
8266
8267         /* Initialize MBUF/DESC pool. */
8268         if (tg3_flag(tp, 5750_PLUS)) {
8269                 /* Do nothing.  */
8270         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8271                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8273                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8274                 else
8275                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8276                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8277                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8278         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8279                 int fw_len;
8280
8281                 fw_len = tp->fw_len;
8282                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8283                 tw32(BUFMGR_MB_POOL_ADDR,
8284                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8285                 tw32(BUFMGR_MB_POOL_SIZE,
8286                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8287         }
8288
8289         if (tp->dev->mtu <= ETH_DATA_LEN) {
8290                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8291                      tp->bufmgr_config.mbuf_read_dma_low_water);
8292                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8293                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8294                 tw32(BUFMGR_MB_HIGH_WATER,
8295                      tp->bufmgr_config.mbuf_high_water);
8296         } else {
8297                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8298                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8299                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8300                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8301                 tw32(BUFMGR_MB_HIGH_WATER,
8302                      tp->bufmgr_config.mbuf_high_water_jumbo);
8303         }
8304         tw32(BUFMGR_DMA_LOW_WATER,
8305              tp->bufmgr_config.dma_low_water);
8306         tw32(BUFMGR_DMA_HIGH_WATER,
8307              tp->bufmgr_config.dma_high_water);
8308
8309         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8311                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8313             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8314             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8315                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8316         tw32(BUFMGR_MODE, val);
8317         for (i = 0; i < 2000; i++) {
8318                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8319                         break;
8320                 udelay(10);
8321         }
8322         if (i >= 2000) {
8323                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8324                 return -ENODEV;
8325         }
8326
8327         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8328                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8329
8330         tg3_setup_rxbd_thresholds(tp);
8331
8332         /* Initialize TG3_BDINFO's at:
8333          *  RCVDBDI_STD_BD:     standard eth size rx ring
8334          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8335          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8336          *
8337          * like so:
8338          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8339          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8340          *                              ring attribute flags
8341          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8342          *
8343          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8344          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8345          *
8346          * The size of each ring is fixed in the firmware, but the location is
8347          * configurable.
8348          */
8349         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8350              ((u64) tpr->rx_std_mapping >> 32));
8351         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8352              ((u64) tpr->rx_std_mapping & 0xffffffff));
8353         if (!tg3_flag(tp, 5717_PLUS))
8354                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8355                      NIC_SRAM_RX_BUFFER_DESC);
8356
8357         /* Disable the mini ring */
8358         if (!tg3_flag(tp, 5705_PLUS))
8359                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8360                      BDINFO_FLAGS_DISABLED);
8361
8362         /* Program the jumbo buffer descriptor ring control
8363          * blocks on those devices that have them.
8364          */
8365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8366             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8367
8368                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8369                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8370                              ((u64) tpr->rx_jmb_mapping >> 32));
8371                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8372                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8373                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8374                               BDINFO_FLAGS_MAXLEN_SHIFT;
8375                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8376                              val | BDINFO_FLAGS_USE_EXT_RECV);
8377                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8378                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8379                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8380                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8381                 } else {
8382                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8383                              BDINFO_FLAGS_DISABLED);
8384                 }
8385
8386                 if (tg3_flag(tp, 57765_PLUS)) {
8387                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8388                                 val = TG3_RX_STD_MAX_SIZE_5700;
8389                         else
8390                                 val = TG3_RX_STD_MAX_SIZE_5717;
8391                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8392                         val |= (TG3_RX_STD_DMA_SZ << 2);
8393                 } else
8394                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8395         } else
8396                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8397
8398         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8399
8400         tpr->rx_std_prod_idx = tp->rx_pending;
8401         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8402
8403         tpr->rx_jmb_prod_idx =
8404                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8405         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8406
8407         tg3_rings_reset(tp);
8408
8409         /* Initialize MAC address and backoff seed. */
8410         __tg3_set_mac_addr(tp, 0);
8411
8412         /* MTU + ethernet header + FCS + optional VLAN tag */
8413         tw32(MAC_RX_MTU_SIZE,
8414              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8415
8416         /* The slot time is changed by tg3_setup_phy if we
8417          * run at gigabit with half duplex.
8418          */
8419         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8420               (6 << TX_LENGTHS_IPG_SHIFT) |
8421               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8422
8423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8424                 val |= tr32(MAC_TX_LENGTHS) &
8425                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8426                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8427
8428         tw32(MAC_TX_LENGTHS, val);
8429
8430         /* Receive rules. */
8431         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8432         tw32(RCVLPC_CONFIG, 0x0181);
8433
8434         /* Calculate RDMAC_MODE setting early, we need it to determine
8435          * the RCVLPC_STATE_ENABLE mask.
8436          */
8437         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8438                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8439                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8440                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8441                       RDMAC_MODE_LNGREAD_ENAB);
8442
8443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8444                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8445
8446         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8447             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8448             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8449                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8450                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8451                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8452
8453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8454             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8455                 if (tg3_flag(tp, TSO_CAPABLE) &&
8456                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8457                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8458                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8459                            !tg3_flag(tp, IS_5788)) {
8460                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8461                 }
8462         }
8463
8464         if (tg3_flag(tp, PCI_EXPRESS))
8465                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8466
8467         if (tg3_flag(tp, HW_TSO_1) ||
8468             tg3_flag(tp, HW_TSO_2) ||
8469             tg3_flag(tp, HW_TSO_3))
8470                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8471
8472         if (tg3_flag(tp, 57765_PLUS) ||
8473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8474             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8475                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8476
8477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8478                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8479
8480         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8482             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8484             tg3_flag(tp, 57765_PLUS)) {
8485                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8486                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8487                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8488                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8489                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8490                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8491                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8492                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8493                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8494                 }
8495                 tw32(TG3_RDMA_RSRVCTRL_REG,
8496                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8497         }
8498
8499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8500             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8501                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8502                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8503                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8504                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8505         }
8506
8507         /* Receive/send statistics. */
8508         if (tg3_flag(tp, 5750_PLUS)) {
8509                 val = tr32(RCVLPC_STATS_ENABLE);
8510                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8511                 tw32(RCVLPC_STATS_ENABLE, val);
8512         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8513                    tg3_flag(tp, TSO_CAPABLE)) {
8514                 val = tr32(RCVLPC_STATS_ENABLE);
8515                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8516                 tw32(RCVLPC_STATS_ENABLE, val);
8517         } else {
8518                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8519         }
8520         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8521         tw32(SNDDATAI_STATSENAB, 0xffffff);
8522         tw32(SNDDATAI_STATSCTRL,
8523              (SNDDATAI_SCTRL_ENABLE |
8524               SNDDATAI_SCTRL_FASTUPD));
8525
8526         /* Setup host coalescing engine. */
8527         tw32(HOSTCC_MODE, 0);
8528         for (i = 0; i < 2000; i++) {
8529                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8530                         break;
8531                 udelay(10);
8532         }
8533
8534         __tg3_set_coalesce(tp, &tp->coal);
8535
8536         if (!tg3_flag(tp, 5705_PLUS)) {
8537                 /* Status/statistics block address.  See tg3_timer,
8538                  * the tg3_periodic_fetch_stats call there, and
8539                  * tg3_get_stats to see how this works for 5705/5750 chips.
8540                  */
8541                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8542                      ((u64) tp->stats_mapping >> 32));
8543                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8544                      ((u64) tp->stats_mapping & 0xffffffff));
8545                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8546
8547                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8548
8549                 /* Clear statistics and status block memory areas */
8550                 for (i = NIC_SRAM_STATS_BLK;
8551                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8552                      i += sizeof(u32)) {
8553                         tg3_write_mem(tp, i, 0);
8554                         udelay(40);
8555                 }
8556         }
8557
8558         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8559
8560         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8561         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8562         if (!tg3_flag(tp, 5705_PLUS))
8563                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8564
8565         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8566                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8567                 /* reset to prevent losing 1st rx packet intermittently */
8568                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8569                 udelay(10);
8570         }
8571
8572         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8573                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8574                         MAC_MODE_FHDE_ENABLE;
8575         if (tg3_flag(tp, ENABLE_APE))
8576                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8577         if (!tg3_flag(tp, 5705_PLUS) &&
8578             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8579             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8580                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8581         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8582         udelay(40);
8583
8584         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8585          * If TG3_FLAG_IS_NIC is zero, we should read the
8586          * register to preserve the GPIO settings for LOMs. The GPIOs,
8587          * whether used as inputs or outputs, are set by boot code after
8588          * reset.
8589          */
8590         if (!tg3_flag(tp, IS_NIC)) {
8591                 u32 gpio_mask;
8592
8593                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8594                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8595                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8596
8597                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8598                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8599                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8600
8601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8602                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8603
8604                 tp->grc_local_ctrl &= ~gpio_mask;
8605                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8606
8607                 /* GPIO1 must be driven high for eeprom write protect */
8608                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8609                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8610                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8611         }
8612         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8613         udelay(100);
8614
8615         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8616                 val = tr32(MSGINT_MODE);
8617                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8618                 tw32(MSGINT_MODE, val);
8619         }
8620
8621         if (!tg3_flag(tp, 5705_PLUS)) {
8622                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8623                 udelay(40);
8624         }
8625
8626         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8627                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8628                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8629                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8630                WDMAC_MODE_LNGREAD_ENAB);
8631
8632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8633             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8634                 if (tg3_flag(tp, TSO_CAPABLE) &&
8635                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8636                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8637                         /* nothing */
8638                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8639                            !tg3_flag(tp, IS_5788)) {
8640                         val |= WDMAC_MODE_RX_ACCEL;
8641                 }
8642         }
8643
8644         /* Enable host coalescing bug fix */
8645         if (tg3_flag(tp, 5755_PLUS))
8646                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8647
8648         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8649                 val |= WDMAC_MODE_BURST_ALL_DATA;
8650
8651         tw32_f(WDMAC_MODE, val);
8652         udelay(40);
8653
8654         if (tg3_flag(tp, PCIX_MODE)) {
8655                 u16 pcix_cmd;
8656
8657                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8658                                      &pcix_cmd);
8659                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8660                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8661                         pcix_cmd |= PCI_X_CMD_READ_2K;
8662                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8663                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8664                         pcix_cmd |= PCI_X_CMD_READ_2K;
8665                 }
8666                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8667                                       pcix_cmd);
8668         }
8669
8670         tw32_f(RDMAC_MODE, rdmac_mode);
8671         udelay(40);
8672
8673         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8674         if (!tg3_flag(tp, 5705_PLUS))
8675                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8676
8677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8678                 tw32(SNDDATAC_MODE,
8679                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8680         else
8681                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8682
8683         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8684         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8685         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8686         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8687                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8688         tw32(RCVDBDI_MODE, val);
8689         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8690         if (tg3_flag(tp, HW_TSO_1) ||
8691             tg3_flag(tp, HW_TSO_2) ||
8692             tg3_flag(tp, HW_TSO_3))
8693                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8694         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8695         if (tg3_flag(tp, ENABLE_TSS))
8696                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8697         tw32(SNDBDI_MODE, val);
8698         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8699
8700         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8701                 err = tg3_load_5701_a0_firmware_fix(tp);
8702                 if (err)
8703                         return err;
8704         }
8705
8706         if (tg3_flag(tp, TSO_CAPABLE)) {
8707                 err = tg3_load_tso_firmware(tp);
8708                 if (err)
8709                         return err;
8710         }
8711
8712         tp->tx_mode = TX_MODE_ENABLE;
8713
8714         if (tg3_flag(tp, 5755_PLUS) ||
8715             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8716                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8717
8718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8719                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8720                 tp->tx_mode &= ~val;
8721                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8722         }
8723
8724         tw32_f(MAC_TX_MODE, tp->tx_mode);
8725         udelay(100);
8726
8727         if (tg3_flag(tp, ENABLE_RSS)) {
8728                 int i = 0;
8729                 u32 reg = MAC_RSS_INDIR_TBL_0;
8730
8731                 if (tp->irq_cnt == 2) {
8732                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8733                                 tw32(reg, 0x0);
8734                                 reg += 4;
8735                         }
8736                 } else {
8737                         u32 val;
8738
8739                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8740                                 val = i % (tp->irq_cnt - 1);
8741                                 i++;
8742                                 for (; i % 8; i++) {
8743                                         val <<= 4;
8744                                         val |= (i % (tp->irq_cnt - 1));
8745                                 }
8746                                 tw32(reg, val);
8747                                 reg += 4;
8748                         }
8749                 }
8750
8751                 /* Setup the "secret" hash key. */
8752                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8753                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8754                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8755                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8756                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8757                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8758                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8759                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8760                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8761                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8762         }
8763
8764         tp->rx_mode = RX_MODE_ENABLE;
8765         if (tg3_flag(tp, 5755_PLUS))
8766                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8767
8768         if (tg3_flag(tp, ENABLE_RSS))
8769                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8770                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8771                                RX_MODE_RSS_IPV6_HASH_EN |
8772                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8773                                RX_MODE_RSS_IPV4_HASH_EN |
8774                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8775
8776         tw32_f(MAC_RX_MODE, tp->rx_mode);
8777         udelay(10);
8778
8779         tw32(MAC_LED_CTRL, tp->led_ctrl);
8780
8781         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8782         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8783                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8784                 udelay(10);
8785         }
8786         tw32_f(MAC_RX_MODE, tp->rx_mode);
8787         udelay(10);
8788
8789         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8790                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8791                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8792                         /* Set drive transmission level to 1.2V  */
8793                         /* only if the signal pre-emphasis bit is not set  */
8794                         val = tr32(MAC_SERDES_CFG);
8795                         val &= 0xfffff000;
8796                         val |= 0x880;
8797                         tw32(MAC_SERDES_CFG, val);
8798                 }
8799                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8800                         tw32(MAC_SERDES_CFG, 0x616000);
8801         }
8802
8803         /* Prevent chip from dropping frames when flow control
8804          * is enabled.
8805          */
8806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8807                 val = 1;
8808         else
8809                 val = 2;
8810         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8811
8812         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8813             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8814                 /* Use hardware link auto-negotiation */
8815                 tg3_flag_set(tp, HW_AUTONEG);
8816         }
8817
8818         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8819             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8820                 u32 tmp;
8821
8822                 tmp = tr32(SERDES_RX_CTRL);
8823                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8824                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8825                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8826                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8827         }
8828
8829         if (!tg3_flag(tp, USE_PHYLIB)) {
8830                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8831                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8832                         tp->link_config.speed = tp->link_config.orig_speed;
8833                         tp->link_config.duplex = tp->link_config.orig_duplex;
8834                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8835                 }
8836
8837                 err = tg3_setup_phy(tp, 0);
8838                 if (err)
8839                         return err;
8840
8841                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8842                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8843                         u32 tmp;
8844
8845                         /* Clear CRC stats. */
8846                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8847                                 tg3_writephy(tp, MII_TG3_TEST1,
8848                                              tmp | MII_TG3_TEST1_CRC_EN);
8849                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8850                         }
8851                 }
8852         }
8853
8854         __tg3_set_rx_mode(tp->dev);
8855
8856         /* Initialize receive rules. */
8857         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8858         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8859         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8860         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8861
8862         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8863                 limit = 8;
8864         else
8865                 limit = 16;
8866         if (tg3_flag(tp, ENABLE_ASF))
8867                 limit -= 4;
8868         switch (limit) {
8869         case 16:
8870                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8871         case 15:
8872                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8873         case 14:
8874                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8875         case 13:
8876                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8877         case 12:
8878                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8879         case 11:
8880                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8881         case 10:
8882                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8883         case 9:
8884                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8885         case 8:
8886                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8887         case 7:
8888                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8889         case 6:
8890                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8891         case 5:
8892                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8893         case 4:
8894                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8895         case 3:
8896                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8897         case 2:
8898         case 1:
8899
8900         default:
8901                 break;
8902         }
8903
8904         if (tg3_flag(tp, ENABLE_APE))
8905                 /* Write our heartbeat update interval to APE. */
8906                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8907                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8908
8909         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8910
8911         return 0;
8912 }
8913
8914 /* Called at device open time to get the chip ready for
8915  * packet processing.  Invoked with tp->lock held.
8916  */
8917 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8918 {
8919         tg3_switch_clocks(tp);
8920
8921         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8922
8923         return tg3_reset_hw(tp, reset_phy);
8924 }
8925
8926 #define TG3_STAT_ADD32(PSTAT, REG) \
8927 do {    u32 __val = tr32(REG); \
8928         (PSTAT)->low += __val; \
8929         if ((PSTAT)->low < __val) \
8930                 (PSTAT)->high += 1; \
8931 } while (0)
8932
8933 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8934 {
8935         struct tg3_hw_stats *sp = tp->hw_stats;
8936
8937         if (!netif_carrier_ok(tp->dev))
8938                 return;
8939
8940         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8941         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8942         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8943         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8944         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8945         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8946         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8947         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8948         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8949         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8950         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8951         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8952         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8953
8954         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8955         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8956         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8957         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8958         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8959         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8960         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8961         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8962         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8963         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8964         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8965         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8966         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8967         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8968
8969         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8970         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8971             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8972             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8973                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8974         } else {
8975                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8976                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8977                 if (val) {
8978                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8979                         sp->rx_discards.low += val;
8980                         if (sp->rx_discards.low < val)
8981                                 sp->rx_discards.high += 1;
8982                 }
8983                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8984         }
8985         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8986 }
8987
8988 static void tg3_chk_missed_msi(struct tg3 *tp)
8989 {
8990         u32 i;
8991
8992         for (i = 0; i < tp->irq_cnt; i++) {
8993                 struct tg3_napi *tnapi = &tp->napi[i];
8994
8995                 if (tg3_has_work(tnapi)) {
8996                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8997                             tnapi->last_tx_cons == tnapi->tx_cons) {
8998                                 if (tnapi->chk_msi_cnt < 1) {
8999                                         tnapi->chk_msi_cnt++;
9000                                         return;
9001                                 }
9002                                 tw32_mailbox(tnapi->int_mbox,
9003                                              tnapi->last_tag << 24);
9004                         }
9005                 }
9006                 tnapi->chk_msi_cnt = 0;
9007                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9008                 tnapi->last_tx_cons = tnapi->tx_cons;
9009         }
9010 }
9011
9012 static void tg3_timer(unsigned long __opaque)
9013 {
9014         struct tg3 *tp = (struct tg3 *) __opaque;
9015
9016         if (tp->irq_sync)
9017                 goto restart_timer;
9018
9019         spin_lock(&tp->lock);
9020
9021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9023                 tg3_chk_missed_msi(tp);
9024
9025         if (!tg3_flag(tp, TAGGED_STATUS)) {
9026                 /* All of this garbage is because when using non-tagged
9027                  * IRQ status the mailbox/status_block protocol the chip
9028                  * uses with the cpu is race prone.
9029                  */
9030                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9031                         tw32(GRC_LOCAL_CTRL,
9032                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9033                 } else {
9034                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9035                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9036                 }
9037
9038                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9039                         tg3_flag_set(tp, RESTART_TIMER);
9040                         spin_unlock(&tp->lock);
9041                         schedule_work(&tp->reset_task);
9042                         return;
9043                 }
9044         }
9045
9046         /* This part only runs once per second. */
9047         if (!--tp->timer_counter) {
9048                 if (tg3_flag(tp, 5705_PLUS))
9049                         tg3_periodic_fetch_stats(tp);
9050
9051                 if (tp->setlpicnt && !--tp->setlpicnt)
9052                         tg3_phy_eee_enable(tp);
9053
9054                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9055                         u32 mac_stat;
9056                         int phy_event;
9057
9058                         mac_stat = tr32(MAC_STATUS);
9059
9060                         phy_event = 0;
9061                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9062                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9063                                         phy_event = 1;
9064                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9065                                 phy_event = 1;
9066
9067                         if (phy_event)
9068                                 tg3_setup_phy(tp, 0);
9069                 } else if (tg3_flag(tp, POLL_SERDES)) {
9070                         u32 mac_stat = tr32(MAC_STATUS);
9071                         int need_setup = 0;
9072
9073                         if (netif_carrier_ok(tp->dev) &&
9074                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9075                                 need_setup = 1;
9076                         }
9077                         if (!netif_carrier_ok(tp->dev) &&
9078                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9079                                          MAC_STATUS_SIGNAL_DET))) {
9080                                 need_setup = 1;
9081                         }
9082                         if (need_setup) {
9083                                 if (!tp->serdes_counter) {
9084                                         tw32_f(MAC_MODE,
9085                                              (tp->mac_mode &
9086                                               ~MAC_MODE_PORT_MODE_MASK));
9087                                         udelay(40);
9088                                         tw32_f(MAC_MODE, tp->mac_mode);
9089                                         udelay(40);
9090                                 }
9091                                 tg3_setup_phy(tp, 0);
9092                         }
9093                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9094                            tg3_flag(tp, 5780_CLASS)) {
9095                         tg3_serdes_parallel_detect(tp);
9096                 }
9097
9098                 tp->timer_counter = tp->timer_multiplier;
9099         }
9100
9101         /* Heartbeat is only sent once every 2 seconds.
9102          *
9103          * The heartbeat is to tell the ASF firmware that the host
9104          * driver is still alive.  In the event that the OS crashes,
9105          * ASF needs to reset the hardware to free up the FIFO space
9106          * that may be filled with rx packets destined for the host.
9107          * If the FIFO is full, ASF will no longer function properly.
9108          *
9109          * Unintended resets have been reported on real time kernels
9110          * where the timer doesn't run on time.  Netpoll will also have
9111          * same problem.
9112          *
9113          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9114          * to check the ring condition when the heartbeat is expiring
9115          * before doing the reset.  This will prevent most unintended
9116          * resets.
9117          */
9118         if (!--tp->asf_counter) {
9119                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9120                         tg3_wait_for_event_ack(tp);
9121
9122                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9123                                       FWCMD_NICDRV_ALIVE3);
9124                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9125                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9126                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9127
9128                         tg3_generate_fw_event(tp);
9129                 }
9130                 tp->asf_counter = tp->asf_multiplier;
9131         }
9132
9133         spin_unlock(&tp->lock);
9134
9135 restart_timer:
9136         tp->timer.expires = jiffies + tp->timer_offset;
9137         add_timer(&tp->timer);
9138 }
9139
9140 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9141 {
9142         irq_handler_t fn;
9143         unsigned long flags;
9144         char *name;
9145         struct tg3_napi *tnapi = &tp->napi[irq_num];
9146
9147         if (tp->irq_cnt == 1)
9148                 name = tp->dev->name;
9149         else {
9150                 name = &tnapi->irq_lbl[0];
9151                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9152                 name[IFNAMSIZ-1] = 0;
9153         }
9154
9155         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9156                 fn = tg3_msi;
9157                 if (tg3_flag(tp, 1SHOT_MSI))
9158                         fn = tg3_msi_1shot;
9159                 flags = 0;
9160         } else {
9161                 fn = tg3_interrupt;
9162                 if (tg3_flag(tp, TAGGED_STATUS))
9163                         fn = tg3_interrupt_tagged;
9164                 flags = IRQF_SHARED;
9165         }
9166
9167         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9168 }
9169
9170 static int tg3_test_interrupt(struct tg3 *tp)
9171 {
9172         struct tg3_napi *tnapi = &tp->napi[0];
9173         struct net_device *dev = tp->dev;
9174         int err, i, intr_ok = 0;
9175         u32 val;
9176
9177         if (!netif_running(dev))
9178                 return -ENODEV;
9179
9180         tg3_disable_ints(tp);
9181
9182         free_irq(tnapi->irq_vec, tnapi);
9183
9184         /*
9185          * Turn off MSI one shot mode.  Otherwise this test has no
9186          * observable way to know whether the interrupt was delivered.
9187          */
9188         if (tg3_flag(tp, 57765_PLUS)) {
9189                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9190                 tw32(MSGINT_MODE, val);
9191         }
9192
9193         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9194                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9195         if (err)
9196                 return err;
9197
9198         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9199         tg3_enable_ints(tp);
9200
9201         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9202                tnapi->coal_now);
9203
9204         for (i = 0; i < 5; i++) {
9205                 u32 int_mbox, misc_host_ctrl;
9206
9207                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9208                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9209
9210                 if ((int_mbox != 0) ||
9211                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9212                         intr_ok = 1;
9213                         break;
9214                 }
9215
9216                 if (tg3_flag(tp, 57765_PLUS) &&
9217                     tnapi->hw_status->status_tag != tnapi->last_tag)
9218                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9219
9220                 msleep(10);
9221         }
9222
9223         tg3_disable_ints(tp);
9224
9225         free_irq(tnapi->irq_vec, tnapi);
9226
9227         err = tg3_request_irq(tp, 0);
9228
9229         if (err)
9230                 return err;
9231
9232         if (intr_ok) {
9233                 /* Reenable MSI one shot mode. */
9234                 if (tg3_flag(tp, 57765_PLUS)) {
9235                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9236                         tw32(MSGINT_MODE, val);
9237                 }
9238                 return 0;
9239         }
9240
9241         return -EIO;
9242 }
9243
9244 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9245  * successfully restored
9246  */
9247 static int tg3_test_msi(struct tg3 *tp)
9248 {
9249         int err;
9250         u16 pci_cmd;
9251
9252         if (!tg3_flag(tp, USING_MSI))
9253                 return 0;
9254
9255         /* Turn off SERR reporting in case MSI terminates with Master
9256          * Abort.
9257          */
9258         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9259         pci_write_config_word(tp->pdev, PCI_COMMAND,
9260                               pci_cmd & ~PCI_COMMAND_SERR);
9261
9262         err = tg3_test_interrupt(tp);
9263
9264         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9265
9266         if (!err)
9267                 return 0;
9268
9269         /* other failures */
9270         if (err != -EIO)
9271                 return err;
9272
9273         /* MSI test failed, go back to INTx mode */
9274         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9275                     "to INTx mode. Please report this failure to the PCI "
9276                     "maintainer and include system chipset information\n");
9277
9278         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9279
9280         pci_disable_msi(tp->pdev);
9281
9282         tg3_flag_clear(tp, USING_MSI);
9283         tp->napi[0].irq_vec = tp->pdev->irq;
9284
9285         err = tg3_request_irq(tp, 0);
9286         if (err)
9287                 return err;
9288
9289         /* Need to reset the chip because the MSI cycle may have terminated
9290          * with Master Abort.
9291          */
9292         tg3_full_lock(tp, 1);
9293
9294         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9295         err = tg3_init_hw(tp, 1);
9296
9297         tg3_full_unlock(tp);
9298
9299         if (err)
9300                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9301
9302         return err;
9303 }
9304
9305 static int tg3_request_firmware(struct tg3 *tp)
9306 {
9307         const __be32 *fw_data;
9308
9309         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9310                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9311                            tp->fw_needed);
9312                 return -ENOENT;
9313         }
9314
9315         fw_data = (void *)tp->fw->data;
9316
9317         /* Firmware blob starts with version numbers, followed by
9318          * start address and _full_ length including BSS sections
9319          * (which must be longer than the actual data, of course
9320          */
9321
9322         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9323         if (tp->fw_len < (tp->fw->size - 12)) {
9324                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9325                            tp->fw_len, tp->fw_needed);
9326                 release_firmware(tp->fw);
9327                 tp->fw = NULL;
9328                 return -EINVAL;
9329         }
9330
9331         /* We no longer need firmware; we have it. */
9332         tp->fw_needed = NULL;
9333         return 0;
9334 }
9335
9336 static bool tg3_enable_msix(struct tg3 *tp)
9337 {
9338         int i, rc, cpus = num_online_cpus();
9339         struct msix_entry msix_ent[tp->irq_max];
9340
9341         if (cpus == 1)
9342                 /* Just fallback to the simpler MSI mode. */
9343                 return false;
9344
9345         /*
9346          * We want as many rx rings enabled as there are cpus.
9347          * The first MSIX vector only deals with link interrupts, etc,
9348          * so we add one to the number of vectors we are requesting.
9349          */
9350         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9351
9352         for (i = 0; i < tp->irq_max; i++) {
9353                 msix_ent[i].entry  = i;
9354                 msix_ent[i].vector = 0;
9355         }
9356
9357         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9358         if (rc < 0) {
9359                 return false;
9360         } else if (rc != 0) {
9361                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9362                         return false;
9363                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9364                               tp->irq_cnt, rc);
9365                 tp->irq_cnt = rc;
9366         }
9367
9368         for (i = 0; i < tp->irq_max; i++)
9369                 tp->napi[i].irq_vec = msix_ent[i].vector;
9370
9371         netif_set_real_num_tx_queues(tp->dev, 1);
9372         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9373         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9374                 pci_disable_msix(tp->pdev);
9375                 return false;
9376         }
9377
9378         if (tp->irq_cnt > 1) {
9379                 tg3_flag_set(tp, ENABLE_RSS);
9380
9381                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9382                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9383                         tg3_flag_set(tp, ENABLE_TSS);
9384                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9385                 }
9386         }
9387
9388         return true;
9389 }
9390
9391 static void tg3_ints_init(struct tg3 *tp)
9392 {
9393         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9394             !tg3_flag(tp, TAGGED_STATUS)) {
9395                 /* All MSI supporting chips should support tagged
9396                  * status.  Assert that this is the case.
9397                  */
9398                 netdev_warn(tp->dev,
9399                             "MSI without TAGGED_STATUS? Not using MSI\n");
9400                 goto defcfg;
9401         }
9402
9403         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9404                 tg3_flag_set(tp, USING_MSIX);
9405         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9406                 tg3_flag_set(tp, USING_MSI);
9407
9408         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9409                 u32 msi_mode = tr32(MSGINT_MODE);
9410                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9411                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9412                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9413         }
9414 defcfg:
9415         if (!tg3_flag(tp, USING_MSIX)) {
9416                 tp->irq_cnt = 1;
9417                 tp->napi[0].irq_vec = tp->pdev->irq;
9418                 netif_set_real_num_tx_queues(tp->dev, 1);
9419                 netif_set_real_num_rx_queues(tp->dev, 1);
9420         }
9421 }
9422
9423 static void tg3_ints_fini(struct tg3 *tp)
9424 {
9425         if (tg3_flag(tp, USING_MSIX))
9426                 pci_disable_msix(tp->pdev);
9427         else if (tg3_flag(tp, USING_MSI))
9428                 pci_disable_msi(tp->pdev);
9429         tg3_flag_clear(tp, USING_MSI);
9430         tg3_flag_clear(tp, USING_MSIX);
9431         tg3_flag_clear(tp, ENABLE_RSS);
9432         tg3_flag_clear(tp, ENABLE_TSS);
9433 }
9434
9435 static int tg3_open(struct net_device *dev)
9436 {
9437         struct tg3 *tp = netdev_priv(dev);
9438         int i, err;
9439
9440         if (tp->fw_needed) {
9441                 err = tg3_request_firmware(tp);
9442                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9443                         if (err)
9444                                 return err;
9445                 } else if (err) {
9446                         netdev_warn(tp->dev, "TSO capability disabled\n");
9447                         tg3_flag_clear(tp, TSO_CAPABLE);
9448                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9449                         netdev_notice(tp->dev, "TSO capability restored\n");
9450                         tg3_flag_set(tp, TSO_CAPABLE);
9451                 }
9452         }
9453
9454         netif_carrier_off(tp->dev);
9455
9456         err = tg3_power_up(tp);
9457         if (err)
9458                 return err;
9459
9460         tg3_full_lock(tp, 0);
9461
9462         tg3_disable_ints(tp);
9463         tg3_flag_clear(tp, INIT_COMPLETE);
9464
9465         tg3_full_unlock(tp);
9466
9467         /*
9468          * Setup interrupts first so we know how
9469          * many NAPI resources to allocate
9470          */
9471         tg3_ints_init(tp);
9472
9473         /* The placement of this call is tied
9474          * to the setup and use of Host TX descriptors.
9475          */
9476         err = tg3_alloc_consistent(tp);
9477         if (err)
9478                 goto err_out1;
9479
9480         tg3_napi_init(tp);
9481
9482         tg3_napi_enable(tp);
9483
9484         for (i = 0; i < tp->irq_cnt; i++) {
9485                 struct tg3_napi *tnapi = &tp->napi[i];
9486                 err = tg3_request_irq(tp, i);
9487                 if (err) {
9488                         for (i--; i >= 0; i--)
9489                                 free_irq(tnapi->irq_vec, tnapi);
9490                         break;
9491                 }
9492         }
9493
9494         if (err)
9495                 goto err_out2;
9496
9497         tg3_full_lock(tp, 0);
9498
9499         err = tg3_init_hw(tp, 1);
9500         if (err) {
9501                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9502                 tg3_free_rings(tp);
9503         } else {
9504                 if (tg3_flag(tp, TAGGED_STATUS) &&
9505                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9506                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9507                         tp->timer_offset = HZ;
9508                 else
9509                         tp->timer_offset = HZ / 10;
9510
9511                 BUG_ON(tp->timer_offset > HZ);
9512                 tp->timer_counter = tp->timer_multiplier =
9513                         (HZ / tp->timer_offset);
9514                 tp->asf_counter = tp->asf_multiplier =
9515                         ((HZ / tp->timer_offset) * 2);
9516
9517                 init_timer(&tp->timer);
9518                 tp->timer.expires = jiffies + tp->timer_offset;
9519                 tp->timer.data = (unsigned long) tp;
9520                 tp->timer.function = tg3_timer;
9521         }
9522
9523         tg3_full_unlock(tp);
9524
9525         if (err)
9526                 goto err_out3;
9527
9528         if (tg3_flag(tp, USING_MSI)) {
9529                 err = tg3_test_msi(tp);
9530
9531                 if (err) {
9532                         tg3_full_lock(tp, 0);
9533                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9534                         tg3_free_rings(tp);
9535                         tg3_full_unlock(tp);
9536
9537                         goto err_out2;
9538                 }
9539
9540                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9541                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9542
9543                         tw32(PCIE_TRANSACTION_CFG,
9544                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9545                 }
9546         }
9547
9548         tg3_phy_start(tp);
9549
9550         tg3_full_lock(tp, 0);
9551
9552         add_timer(&tp->timer);
9553         tg3_flag_set(tp, INIT_COMPLETE);
9554         tg3_enable_ints(tp);
9555
9556         tg3_full_unlock(tp);
9557
9558         netif_tx_start_all_queues(dev);
9559
9560         /*
9561          * Reset loopback feature if it was turned on while the device was down
9562          * make sure that it's installed properly now.
9563          */
9564         if (dev->features & NETIF_F_LOOPBACK)
9565                 tg3_set_loopback(dev, dev->features);
9566
9567         return 0;
9568
9569 err_out3:
9570         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9571                 struct tg3_napi *tnapi = &tp->napi[i];
9572                 free_irq(tnapi->irq_vec, tnapi);
9573         }
9574
9575 err_out2:
9576         tg3_napi_disable(tp);
9577         tg3_napi_fini(tp);
9578         tg3_free_consistent(tp);
9579
9580 err_out1:
9581         tg3_ints_fini(tp);
9582         tg3_frob_aux_power(tp, false);
9583         pci_set_power_state(tp->pdev, PCI_D3hot);
9584         return err;
9585 }
9586
9587 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9588                                                  struct rtnl_link_stats64 *);
9589 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9590
9591 static int tg3_close(struct net_device *dev)
9592 {
9593         int i;
9594         struct tg3 *tp = netdev_priv(dev);
9595
9596         tg3_napi_disable(tp);
9597         cancel_work_sync(&tp->reset_task);
9598
9599         netif_tx_stop_all_queues(dev);
9600
9601         del_timer_sync(&tp->timer);
9602
9603         tg3_phy_stop(tp);
9604
9605         tg3_full_lock(tp, 1);
9606
9607         tg3_disable_ints(tp);
9608
9609         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9610         tg3_free_rings(tp);
9611         tg3_flag_clear(tp, INIT_COMPLETE);
9612
9613         tg3_full_unlock(tp);
9614
9615         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9616                 struct tg3_napi *tnapi = &tp->napi[i];
9617                 free_irq(tnapi->irq_vec, tnapi);
9618         }
9619
9620         tg3_ints_fini(tp);
9621
9622         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9623
9624         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9625                sizeof(tp->estats_prev));
9626
9627         tg3_napi_fini(tp);
9628
9629         tg3_free_consistent(tp);
9630
9631         tg3_power_down(tp);
9632
9633         netif_carrier_off(tp->dev);
9634
9635         return 0;
9636 }
9637
9638 static inline u64 get_stat64(tg3_stat64_t *val)
9639 {
9640        return ((u64)val->high << 32) | ((u64)val->low);
9641 }
9642
9643 static u64 calc_crc_errors(struct tg3 *tp)
9644 {
9645         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9646
9647         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9648             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9649              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9650                 u32 val;
9651
9652                 spin_lock_bh(&tp->lock);
9653                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9654                         tg3_writephy(tp, MII_TG3_TEST1,
9655                                      val | MII_TG3_TEST1_CRC_EN);
9656                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9657                 } else
9658                         val = 0;
9659                 spin_unlock_bh(&tp->lock);
9660
9661                 tp->phy_crc_errors += val;
9662
9663                 return tp->phy_crc_errors;
9664         }
9665
9666         return get_stat64(&hw_stats->rx_fcs_errors);
9667 }
9668
9669 #define ESTAT_ADD(member) \
9670         estats->member =        old_estats->member + \
9671                                 get_stat64(&hw_stats->member)
9672
9673 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9674 {
9675         struct tg3_ethtool_stats *estats = &tp->estats;
9676         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9677         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9678
9679         if (!hw_stats)
9680                 return old_estats;
9681
9682         ESTAT_ADD(rx_octets);
9683         ESTAT_ADD(rx_fragments);
9684         ESTAT_ADD(rx_ucast_packets);
9685         ESTAT_ADD(rx_mcast_packets);
9686         ESTAT_ADD(rx_bcast_packets);
9687         ESTAT_ADD(rx_fcs_errors);
9688         ESTAT_ADD(rx_align_errors);
9689         ESTAT_ADD(rx_xon_pause_rcvd);
9690         ESTAT_ADD(rx_xoff_pause_rcvd);
9691         ESTAT_ADD(rx_mac_ctrl_rcvd);
9692         ESTAT_ADD(rx_xoff_entered);
9693         ESTAT_ADD(rx_frame_too_long_errors);
9694         ESTAT_ADD(rx_jabbers);
9695         ESTAT_ADD(rx_undersize_packets);
9696         ESTAT_ADD(rx_in_length_errors);
9697         ESTAT_ADD(rx_out_length_errors);
9698         ESTAT_ADD(rx_64_or_less_octet_packets);
9699         ESTAT_ADD(rx_65_to_127_octet_packets);
9700         ESTAT_ADD(rx_128_to_255_octet_packets);
9701         ESTAT_ADD(rx_256_to_511_octet_packets);
9702         ESTAT_ADD(rx_512_to_1023_octet_packets);
9703         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9704         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9705         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9706         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9707         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9708
9709         ESTAT_ADD(tx_octets);
9710         ESTAT_ADD(tx_collisions);
9711         ESTAT_ADD(tx_xon_sent);
9712         ESTAT_ADD(tx_xoff_sent);
9713         ESTAT_ADD(tx_flow_control);
9714         ESTAT_ADD(tx_mac_errors);
9715         ESTAT_ADD(tx_single_collisions);
9716         ESTAT_ADD(tx_mult_collisions);
9717         ESTAT_ADD(tx_deferred);
9718         ESTAT_ADD(tx_excessive_collisions);
9719         ESTAT_ADD(tx_late_collisions);
9720         ESTAT_ADD(tx_collide_2times);
9721         ESTAT_ADD(tx_collide_3times);
9722         ESTAT_ADD(tx_collide_4times);
9723         ESTAT_ADD(tx_collide_5times);
9724         ESTAT_ADD(tx_collide_6times);
9725         ESTAT_ADD(tx_collide_7times);
9726         ESTAT_ADD(tx_collide_8times);
9727         ESTAT_ADD(tx_collide_9times);
9728         ESTAT_ADD(tx_collide_10times);
9729         ESTAT_ADD(tx_collide_11times);
9730         ESTAT_ADD(tx_collide_12times);
9731         ESTAT_ADD(tx_collide_13times);
9732         ESTAT_ADD(tx_collide_14times);
9733         ESTAT_ADD(tx_collide_15times);
9734         ESTAT_ADD(tx_ucast_packets);
9735         ESTAT_ADD(tx_mcast_packets);
9736         ESTAT_ADD(tx_bcast_packets);
9737         ESTAT_ADD(tx_carrier_sense_errors);
9738         ESTAT_ADD(tx_discards);
9739         ESTAT_ADD(tx_errors);
9740
9741         ESTAT_ADD(dma_writeq_full);
9742         ESTAT_ADD(dma_write_prioq_full);
9743         ESTAT_ADD(rxbds_empty);
9744         ESTAT_ADD(rx_discards);
9745         ESTAT_ADD(rx_errors);
9746         ESTAT_ADD(rx_threshold_hit);
9747
9748         ESTAT_ADD(dma_readq_full);
9749         ESTAT_ADD(dma_read_prioq_full);
9750         ESTAT_ADD(tx_comp_queue_full);
9751
9752         ESTAT_ADD(ring_set_send_prod_index);
9753         ESTAT_ADD(ring_status_update);
9754         ESTAT_ADD(nic_irqs);
9755         ESTAT_ADD(nic_avoided_irqs);
9756         ESTAT_ADD(nic_tx_threshold_hit);
9757
9758         ESTAT_ADD(mbuf_lwm_thresh_hit);
9759
9760         return estats;
9761 }
9762
9763 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9764                                                  struct rtnl_link_stats64 *stats)
9765 {
9766         struct tg3 *tp = netdev_priv(dev);
9767         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9768         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9769
9770         if (!hw_stats)
9771                 return old_stats;
9772
9773         stats->rx_packets = old_stats->rx_packets +
9774                 get_stat64(&hw_stats->rx_ucast_packets) +
9775                 get_stat64(&hw_stats->rx_mcast_packets) +
9776                 get_stat64(&hw_stats->rx_bcast_packets);
9777
9778         stats->tx_packets = old_stats->tx_packets +
9779                 get_stat64(&hw_stats->tx_ucast_packets) +
9780                 get_stat64(&hw_stats->tx_mcast_packets) +
9781                 get_stat64(&hw_stats->tx_bcast_packets);
9782
9783         stats->rx_bytes = old_stats->rx_bytes +
9784                 get_stat64(&hw_stats->rx_octets);
9785         stats->tx_bytes = old_stats->tx_bytes +
9786                 get_stat64(&hw_stats->tx_octets);
9787
9788         stats->rx_errors = old_stats->rx_errors +
9789                 get_stat64(&hw_stats->rx_errors);
9790         stats->tx_errors = old_stats->tx_errors +
9791                 get_stat64(&hw_stats->tx_errors) +
9792                 get_stat64(&hw_stats->tx_mac_errors) +
9793                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9794                 get_stat64(&hw_stats->tx_discards);
9795
9796         stats->multicast = old_stats->multicast +
9797                 get_stat64(&hw_stats->rx_mcast_packets);
9798         stats->collisions = old_stats->collisions +
9799                 get_stat64(&hw_stats->tx_collisions);
9800
9801         stats->rx_length_errors = old_stats->rx_length_errors +
9802                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9803                 get_stat64(&hw_stats->rx_undersize_packets);
9804
9805         stats->rx_over_errors = old_stats->rx_over_errors +
9806                 get_stat64(&hw_stats->rxbds_empty);
9807         stats->rx_frame_errors = old_stats->rx_frame_errors +
9808                 get_stat64(&hw_stats->rx_align_errors);
9809         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9810                 get_stat64(&hw_stats->tx_discards);
9811         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9812                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9813
9814         stats->rx_crc_errors = old_stats->rx_crc_errors +
9815                 calc_crc_errors(tp);
9816
9817         stats->rx_missed_errors = old_stats->rx_missed_errors +
9818                 get_stat64(&hw_stats->rx_discards);
9819
9820         stats->rx_dropped = tp->rx_dropped;
9821
9822         return stats;
9823 }
9824
9825 static inline u32 calc_crc(unsigned char *buf, int len)
9826 {
9827         u32 reg;
9828         u32 tmp;
9829         int j, k;
9830
9831         reg = 0xffffffff;
9832
9833         for (j = 0; j < len; j++) {
9834                 reg ^= buf[j];
9835
9836                 for (k = 0; k < 8; k++) {
9837                         tmp = reg & 0x01;
9838
9839                         reg >>= 1;
9840
9841                         if (tmp)
9842                                 reg ^= 0xedb88320;
9843                 }
9844         }
9845
9846         return ~reg;
9847 }
9848
9849 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9850 {
9851         /* accept or reject all multicast frames */
9852         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9853         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9854         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9855         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9856 }
9857
9858 static void __tg3_set_rx_mode(struct net_device *dev)
9859 {
9860         struct tg3 *tp = netdev_priv(dev);
9861         u32 rx_mode;
9862
9863         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9864                                   RX_MODE_KEEP_VLAN_TAG);
9865
9866 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9867         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9868          * flag clear.
9869          */
9870         if (!tg3_flag(tp, ENABLE_ASF))
9871                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9872 #endif
9873
9874         if (dev->flags & IFF_PROMISC) {
9875                 /* Promiscuous mode. */
9876                 rx_mode |= RX_MODE_PROMISC;
9877         } else if (dev->flags & IFF_ALLMULTI) {
9878                 /* Accept all multicast. */
9879                 tg3_set_multi(tp, 1);
9880         } else if (netdev_mc_empty(dev)) {
9881                 /* Reject all multicast. */
9882                 tg3_set_multi(tp, 0);
9883         } else {
9884                 /* Accept one or more multicast(s). */
9885                 struct netdev_hw_addr *ha;
9886                 u32 mc_filter[4] = { 0, };
9887                 u32 regidx;
9888                 u32 bit;
9889                 u32 crc;
9890
9891                 netdev_for_each_mc_addr(ha, dev) {
9892                         crc = calc_crc(ha->addr, ETH_ALEN);
9893                         bit = ~crc & 0x7f;
9894                         regidx = (bit & 0x60) >> 5;
9895                         bit &= 0x1f;
9896                         mc_filter[regidx] |= (1 << bit);
9897                 }
9898
9899                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9900                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9901                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9902                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9903         }
9904
9905         if (rx_mode != tp->rx_mode) {
9906                 tp->rx_mode = rx_mode;
9907                 tw32_f(MAC_RX_MODE, rx_mode);
9908                 udelay(10);
9909         }
9910 }
9911
9912 static void tg3_set_rx_mode(struct net_device *dev)
9913 {
9914         struct tg3 *tp = netdev_priv(dev);
9915
9916         if (!netif_running(dev))
9917                 return;
9918
9919         tg3_full_lock(tp, 0);
9920         __tg3_set_rx_mode(dev);
9921         tg3_full_unlock(tp);
9922 }
9923
9924 static int tg3_get_regs_len(struct net_device *dev)
9925 {
9926         return TG3_REG_BLK_SIZE;
9927 }
9928
9929 static void tg3_get_regs(struct net_device *dev,
9930                 struct ethtool_regs *regs, void *_p)
9931 {
9932         struct tg3 *tp = netdev_priv(dev);
9933
9934         regs->version = 0;
9935
9936         memset(_p, 0, TG3_REG_BLK_SIZE);
9937
9938         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9939                 return;
9940
9941         tg3_full_lock(tp, 0);
9942
9943         tg3_dump_legacy_regs(tp, (u32 *)_p);
9944
9945         tg3_full_unlock(tp);
9946 }
9947
9948 static int tg3_get_eeprom_len(struct net_device *dev)
9949 {
9950         struct tg3 *tp = netdev_priv(dev);
9951
9952         return tp->nvram_size;
9953 }
9954
9955 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9956 {
9957         struct tg3 *tp = netdev_priv(dev);
9958         int ret;
9959         u8  *pd;
9960         u32 i, offset, len, b_offset, b_count;
9961         __be32 val;
9962
9963         if (tg3_flag(tp, NO_NVRAM))
9964                 return -EINVAL;
9965
9966         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9967                 return -EAGAIN;
9968
9969         offset = eeprom->offset;
9970         len = eeprom->len;
9971         eeprom->len = 0;
9972
9973         eeprom->magic = TG3_EEPROM_MAGIC;
9974
9975         if (offset & 3) {
9976                 /* adjustments to start on required 4 byte boundary */
9977                 b_offset = offset & 3;
9978                 b_count = 4 - b_offset;
9979                 if (b_count > len) {
9980                         /* i.e. offset=1 len=2 */
9981                         b_count = len;
9982                 }
9983                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9984                 if (ret)
9985                         return ret;
9986                 memcpy(data, ((char *)&val) + b_offset, b_count);
9987                 len -= b_count;
9988                 offset += b_count;
9989                 eeprom->len += b_count;
9990         }
9991
9992         /* read bytes up to the last 4 byte boundary */
9993         pd = &data[eeprom->len];
9994         for (i = 0; i < (len - (len & 3)); i += 4) {
9995                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9996                 if (ret) {
9997                         eeprom->len += i;
9998                         return ret;
9999                 }
10000                 memcpy(pd + i, &val, 4);
10001         }
10002         eeprom->len += i;
10003
10004         if (len & 3) {
10005                 /* read last bytes not ending on 4 byte boundary */
10006                 pd = &data[eeprom->len];
10007                 b_count = len & 3;
10008                 b_offset = offset + len - b_count;
10009                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10010                 if (ret)
10011                         return ret;
10012                 memcpy(pd, &val, b_count);
10013                 eeprom->len += b_count;
10014         }
10015         return 0;
10016 }
10017
10018 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10019
10020 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10021 {
10022         struct tg3 *tp = netdev_priv(dev);
10023         int ret;
10024         u32 offset, len, b_offset, odd_len;
10025         u8 *buf;
10026         __be32 start, end;
10027
10028         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10029                 return -EAGAIN;
10030
10031         if (tg3_flag(tp, NO_NVRAM) ||
10032             eeprom->magic != TG3_EEPROM_MAGIC)
10033                 return -EINVAL;
10034
10035         offset = eeprom->offset;
10036         len = eeprom->len;
10037
10038         if ((b_offset = (offset & 3))) {
10039                 /* adjustments to start on required 4 byte boundary */
10040                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10041                 if (ret)
10042                         return ret;
10043                 len += b_offset;
10044                 offset &= ~3;
10045                 if (len < 4)
10046                         len = 4;
10047         }
10048
10049         odd_len = 0;
10050         if (len & 3) {
10051                 /* adjustments to end on required 4 byte boundary */
10052                 odd_len = 1;
10053                 len = (len + 3) & ~3;
10054                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10055                 if (ret)
10056                         return ret;
10057         }
10058
10059         buf = data;
10060         if (b_offset || odd_len) {
10061                 buf = kmalloc(len, GFP_KERNEL);
10062                 if (!buf)
10063                         return -ENOMEM;
10064                 if (b_offset)
10065                         memcpy(buf, &start, 4);
10066                 if (odd_len)
10067                         memcpy(buf+len-4, &end, 4);
10068                 memcpy(buf + b_offset, data, eeprom->len);
10069         }
10070
10071         ret = tg3_nvram_write_block(tp, offset, len, buf);
10072
10073         if (buf != data)
10074                 kfree(buf);
10075
10076         return ret;
10077 }
10078
10079 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10080 {
10081         struct tg3 *tp = netdev_priv(dev);
10082
10083         if (tg3_flag(tp, USE_PHYLIB)) {
10084                 struct phy_device *phydev;
10085                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10086                         return -EAGAIN;
10087                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10088                 return phy_ethtool_gset(phydev, cmd);
10089         }
10090
10091         cmd->supported = (SUPPORTED_Autoneg);
10092
10093         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10094                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10095                                    SUPPORTED_1000baseT_Full);
10096
10097         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10098                 cmd->supported |= (SUPPORTED_100baseT_Half |
10099                                   SUPPORTED_100baseT_Full |
10100                                   SUPPORTED_10baseT_Half |
10101                                   SUPPORTED_10baseT_Full |
10102                                   SUPPORTED_TP);
10103                 cmd->port = PORT_TP;
10104         } else {
10105                 cmd->supported |= SUPPORTED_FIBRE;
10106                 cmd->port = PORT_FIBRE;
10107         }
10108
10109         cmd->advertising = tp->link_config.advertising;
10110         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10111                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10112                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10113                                 cmd->advertising |= ADVERTISED_Pause;
10114                         } else {
10115                                 cmd->advertising |= ADVERTISED_Pause |
10116                                                     ADVERTISED_Asym_Pause;
10117                         }
10118                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10119                         cmd->advertising |= ADVERTISED_Asym_Pause;
10120                 }
10121         }
10122         if (netif_running(dev)) {
10123                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10124                 cmd->duplex = tp->link_config.active_duplex;
10125         } else {
10126                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10127                 cmd->duplex = DUPLEX_INVALID;
10128         }
10129         cmd->phy_address = tp->phy_addr;
10130         cmd->transceiver = XCVR_INTERNAL;
10131         cmd->autoneg = tp->link_config.autoneg;
10132         cmd->maxtxpkt = 0;
10133         cmd->maxrxpkt = 0;
10134         return 0;
10135 }
10136
10137 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10138 {
10139         struct tg3 *tp = netdev_priv(dev);
10140         u32 speed = ethtool_cmd_speed(cmd);
10141
10142         if (tg3_flag(tp, USE_PHYLIB)) {
10143                 struct phy_device *phydev;
10144                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10145                         return -EAGAIN;
10146                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10147                 return phy_ethtool_sset(phydev, cmd);
10148         }
10149
10150         if (cmd->autoneg != AUTONEG_ENABLE &&
10151             cmd->autoneg != AUTONEG_DISABLE)
10152                 return -EINVAL;
10153
10154         if (cmd->autoneg == AUTONEG_DISABLE &&
10155             cmd->duplex != DUPLEX_FULL &&
10156             cmd->duplex != DUPLEX_HALF)
10157                 return -EINVAL;
10158
10159         if (cmd->autoneg == AUTONEG_ENABLE) {
10160                 u32 mask = ADVERTISED_Autoneg |
10161                            ADVERTISED_Pause |
10162                            ADVERTISED_Asym_Pause;
10163
10164                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10165                         mask |= ADVERTISED_1000baseT_Half |
10166                                 ADVERTISED_1000baseT_Full;
10167
10168                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10169                         mask |= ADVERTISED_100baseT_Half |
10170                                 ADVERTISED_100baseT_Full |
10171                                 ADVERTISED_10baseT_Half |
10172                                 ADVERTISED_10baseT_Full |
10173                                 ADVERTISED_TP;
10174                 else
10175                         mask |= ADVERTISED_FIBRE;
10176
10177                 if (cmd->advertising & ~mask)
10178                         return -EINVAL;
10179
10180                 mask &= (ADVERTISED_1000baseT_Half |
10181                          ADVERTISED_1000baseT_Full |
10182                          ADVERTISED_100baseT_Half |
10183                          ADVERTISED_100baseT_Full |
10184                          ADVERTISED_10baseT_Half |
10185                          ADVERTISED_10baseT_Full);
10186
10187                 cmd->advertising &= mask;
10188         } else {
10189                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10190                         if (speed != SPEED_1000)
10191                                 return -EINVAL;
10192
10193                         if (cmd->duplex != DUPLEX_FULL)
10194                                 return -EINVAL;
10195                 } else {
10196                         if (speed != SPEED_100 &&
10197                             speed != SPEED_10)
10198                                 return -EINVAL;
10199                 }
10200         }
10201
10202         tg3_full_lock(tp, 0);
10203
10204         tp->link_config.autoneg = cmd->autoneg;
10205         if (cmd->autoneg == AUTONEG_ENABLE) {
10206                 tp->link_config.advertising = (cmd->advertising |
10207                                               ADVERTISED_Autoneg);
10208                 tp->link_config.speed = SPEED_INVALID;
10209                 tp->link_config.duplex = DUPLEX_INVALID;
10210         } else {
10211                 tp->link_config.advertising = 0;
10212                 tp->link_config.speed = speed;
10213                 tp->link_config.duplex = cmd->duplex;
10214         }
10215
10216         tp->link_config.orig_speed = tp->link_config.speed;
10217         tp->link_config.orig_duplex = tp->link_config.duplex;
10218         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10219
10220         if (netif_running(dev))
10221                 tg3_setup_phy(tp, 1);
10222
10223         tg3_full_unlock(tp);
10224
10225         return 0;
10226 }
10227
10228 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10229 {
10230         struct tg3 *tp = netdev_priv(dev);
10231
10232         strcpy(info->driver, DRV_MODULE_NAME);
10233         strcpy(info->version, DRV_MODULE_VERSION);
10234         strcpy(info->fw_version, tp->fw_ver);
10235         strcpy(info->bus_info, pci_name(tp->pdev));
10236 }
10237
10238 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10239 {
10240         struct tg3 *tp = netdev_priv(dev);
10241
10242         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10243                 wol->supported = WAKE_MAGIC;
10244         else
10245                 wol->supported = 0;
10246         wol->wolopts = 0;
10247         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10248                 wol->wolopts = WAKE_MAGIC;
10249         memset(&wol->sopass, 0, sizeof(wol->sopass));
10250 }
10251
10252 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10253 {
10254         struct tg3 *tp = netdev_priv(dev);
10255         struct device *dp = &tp->pdev->dev;
10256
10257         if (wol->wolopts & ~WAKE_MAGIC)
10258                 return -EINVAL;
10259         if ((wol->wolopts & WAKE_MAGIC) &&
10260             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10261                 return -EINVAL;
10262
10263         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10264
10265         spin_lock_bh(&tp->lock);
10266         if (device_may_wakeup(dp))
10267                 tg3_flag_set(tp, WOL_ENABLE);
10268         else
10269                 tg3_flag_clear(tp, WOL_ENABLE);
10270         spin_unlock_bh(&tp->lock);
10271
10272         return 0;
10273 }
10274
10275 static u32 tg3_get_msglevel(struct net_device *dev)
10276 {
10277         struct tg3 *tp = netdev_priv(dev);
10278         return tp->msg_enable;
10279 }
10280
10281 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10282 {
10283         struct tg3 *tp = netdev_priv(dev);
10284         tp->msg_enable = value;
10285 }
10286
10287 static int tg3_nway_reset(struct net_device *dev)
10288 {
10289         struct tg3 *tp = netdev_priv(dev);
10290         int r;
10291
10292         if (!netif_running(dev))
10293                 return -EAGAIN;
10294
10295         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10296                 return -EINVAL;
10297
10298         if (tg3_flag(tp, USE_PHYLIB)) {
10299                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10300                         return -EAGAIN;
10301                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10302         } else {
10303                 u32 bmcr;
10304
10305                 spin_lock_bh(&tp->lock);
10306                 r = -EINVAL;
10307                 tg3_readphy(tp, MII_BMCR, &bmcr);
10308                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10309                     ((bmcr & BMCR_ANENABLE) ||
10310                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10311                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10312                                                    BMCR_ANENABLE);
10313                         r = 0;
10314                 }
10315                 spin_unlock_bh(&tp->lock);
10316         }
10317
10318         return r;
10319 }
10320
10321 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10322 {
10323         struct tg3 *tp = netdev_priv(dev);
10324
10325         ering->rx_max_pending = tp->rx_std_ring_mask;
10326         ering->rx_mini_max_pending = 0;
10327         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10328                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10329         else
10330                 ering->rx_jumbo_max_pending = 0;
10331
10332         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10333
10334         ering->rx_pending = tp->rx_pending;
10335         ering->rx_mini_pending = 0;
10336         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10337                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10338         else
10339                 ering->rx_jumbo_pending = 0;
10340
10341         ering->tx_pending = tp->napi[0].tx_pending;
10342 }
10343
10344 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10345 {
10346         struct tg3 *tp = netdev_priv(dev);
10347         int i, irq_sync = 0, err = 0;
10348
10349         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10350             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10351             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10352             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10353             (tg3_flag(tp, TSO_BUG) &&
10354              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10355                 return -EINVAL;
10356
10357         if (netif_running(dev)) {
10358                 tg3_phy_stop(tp);
10359                 tg3_netif_stop(tp);
10360                 irq_sync = 1;
10361         }
10362
10363         tg3_full_lock(tp, irq_sync);
10364
10365         tp->rx_pending = ering->rx_pending;
10366
10367         if (tg3_flag(tp, MAX_RXPEND_64) &&
10368             tp->rx_pending > 63)
10369                 tp->rx_pending = 63;
10370         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10371
10372         for (i = 0; i < tp->irq_max; i++)
10373                 tp->napi[i].tx_pending = ering->tx_pending;
10374
10375         if (netif_running(dev)) {
10376                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10377                 err = tg3_restart_hw(tp, 1);
10378                 if (!err)
10379                         tg3_netif_start(tp);
10380         }
10381
10382         tg3_full_unlock(tp);
10383
10384         if (irq_sync && !err)
10385                 tg3_phy_start(tp);
10386
10387         return err;
10388 }
10389
10390 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10391 {
10392         struct tg3 *tp = netdev_priv(dev);
10393
10394         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10395
10396         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10397                 epause->rx_pause = 1;
10398         else
10399                 epause->rx_pause = 0;
10400
10401         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10402                 epause->tx_pause = 1;
10403         else
10404                 epause->tx_pause = 0;
10405 }
10406
10407 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10408 {
10409         struct tg3 *tp = netdev_priv(dev);
10410         int err = 0;
10411
10412         if (tg3_flag(tp, USE_PHYLIB)) {
10413                 u32 newadv;
10414                 struct phy_device *phydev;
10415
10416                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10417
10418                 if (!(phydev->supported & SUPPORTED_Pause) ||
10419                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10420                      (epause->rx_pause != epause->tx_pause)))
10421                         return -EINVAL;
10422
10423                 tp->link_config.flowctrl = 0;
10424                 if (epause->rx_pause) {
10425                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10426
10427                         if (epause->tx_pause) {
10428                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10429                                 newadv = ADVERTISED_Pause;
10430                         } else
10431                                 newadv = ADVERTISED_Pause |
10432                                          ADVERTISED_Asym_Pause;
10433                 } else if (epause->tx_pause) {
10434                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10435                         newadv = ADVERTISED_Asym_Pause;
10436                 } else
10437                         newadv = 0;
10438
10439                 if (epause->autoneg)
10440                         tg3_flag_set(tp, PAUSE_AUTONEG);
10441                 else
10442                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10443
10444                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10445                         u32 oldadv = phydev->advertising &
10446                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10447                         if (oldadv != newadv) {
10448                                 phydev->advertising &=
10449                                         ~(ADVERTISED_Pause |
10450                                           ADVERTISED_Asym_Pause);
10451                                 phydev->advertising |= newadv;
10452                                 if (phydev->autoneg) {
10453                                         /*
10454                                          * Always renegotiate the link to
10455                                          * inform our link partner of our
10456                                          * flow control settings, even if the
10457                                          * flow control is forced.  Let
10458                                          * tg3_adjust_link() do the final
10459                                          * flow control setup.
10460                                          */
10461                                         return phy_start_aneg(phydev);
10462                                 }
10463                         }
10464
10465                         if (!epause->autoneg)
10466                                 tg3_setup_flow_control(tp, 0, 0);
10467                 } else {
10468                         tp->link_config.orig_advertising &=
10469                                         ~(ADVERTISED_Pause |
10470                                           ADVERTISED_Asym_Pause);
10471                         tp->link_config.orig_advertising |= newadv;
10472                 }
10473         } else {
10474                 int irq_sync = 0;
10475
10476                 if (netif_running(dev)) {
10477                         tg3_netif_stop(tp);
10478                         irq_sync = 1;
10479                 }
10480
10481                 tg3_full_lock(tp, irq_sync);
10482
10483                 if (epause->autoneg)
10484                         tg3_flag_set(tp, PAUSE_AUTONEG);
10485                 else
10486                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10487                 if (epause->rx_pause)
10488                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10489                 else
10490                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10491                 if (epause->tx_pause)
10492                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10493                 else
10494                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10495
10496                 if (netif_running(dev)) {
10497                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10498                         err = tg3_restart_hw(tp, 1);
10499                         if (!err)
10500                                 tg3_netif_start(tp);
10501                 }
10502
10503                 tg3_full_unlock(tp);
10504         }
10505
10506         return err;
10507 }
10508
10509 static int tg3_get_sset_count(struct net_device *dev, int sset)
10510 {
10511         switch (sset) {
10512         case ETH_SS_TEST:
10513                 return TG3_NUM_TEST;
10514         case ETH_SS_STATS:
10515                 return TG3_NUM_STATS;
10516         default:
10517                 return -EOPNOTSUPP;
10518         }
10519 }
10520
10521 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10522 {
10523         switch (stringset) {
10524         case ETH_SS_STATS:
10525                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10526                 break;
10527         case ETH_SS_TEST:
10528                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10529                 break;
10530         default:
10531                 WARN_ON(1);     /* we need a WARN() */
10532                 break;
10533         }
10534 }
10535
10536 static int tg3_set_phys_id(struct net_device *dev,
10537                             enum ethtool_phys_id_state state)
10538 {
10539         struct tg3 *tp = netdev_priv(dev);
10540
10541         if (!netif_running(tp->dev))
10542                 return -EAGAIN;
10543
10544         switch (state) {
10545         case ETHTOOL_ID_ACTIVE:
10546                 return 1;       /* cycle on/off once per second */
10547
10548         case ETHTOOL_ID_ON:
10549                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10550                      LED_CTRL_1000MBPS_ON |
10551                      LED_CTRL_100MBPS_ON |
10552                      LED_CTRL_10MBPS_ON |
10553                      LED_CTRL_TRAFFIC_OVERRIDE |
10554                      LED_CTRL_TRAFFIC_BLINK |
10555                      LED_CTRL_TRAFFIC_LED);
10556                 break;
10557
10558         case ETHTOOL_ID_OFF:
10559                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10560                      LED_CTRL_TRAFFIC_OVERRIDE);
10561                 break;
10562
10563         case ETHTOOL_ID_INACTIVE:
10564                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10565                 break;
10566         }
10567
10568         return 0;
10569 }
10570
10571 static void tg3_get_ethtool_stats(struct net_device *dev,
10572                                    struct ethtool_stats *estats, u64 *tmp_stats)
10573 {
10574         struct tg3 *tp = netdev_priv(dev);
10575         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10576 }
10577
10578 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10579 {
10580         int i;
10581         __be32 *buf;
10582         u32 offset = 0, len = 0;
10583         u32 magic, val;
10584
10585         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10586                 return NULL;
10587
10588         if (magic == TG3_EEPROM_MAGIC) {
10589                 for (offset = TG3_NVM_DIR_START;
10590                      offset < TG3_NVM_DIR_END;
10591                      offset += TG3_NVM_DIRENT_SIZE) {
10592                         if (tg3_nvram_read(tp, offset, &val))
10593                                 return NULL;
10594
10595                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10596                             TG3_NVM_DIRTYPE_EXTVPD)
10597                                 break;
10598                 }
10599
10600                 if (offset != TG3_NVM_DIR_END) {
10601                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10602                         if (tg3_nvram_read(tp, offset + 4, &offset))
10603                                 return NULL;
10604
10605                         offset = tg3_nvram_logical_addr(tp, offset);
10606                 }
10607         }
10608
10609         if (!offset || !len) {
10610                 offset = TG3_NVM_VPD_OFF;
10611                 len = TG3_NVM_VPD_LEN;
10612         }
10613
10614         buf = kmalloc(len, GFP_KERNEL);
10615         if (buf == NULL)
10616                 return NULL;
10617
10618         if (magic == TG3_EEPROM_MAGIC) {
10619                 for (i = 0; i < len; i += 4) {
10620                         /* The data is in little-endian format in NVRAM.
10621                          * Use the big-endian read routines to preserve
10622                          * the byte order as it exists in NVRAM.
10623                          */
10624                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10625                                 goto error;
10626                 }
10627         } else {
10628                 u8 *ptr;
10629                 ssize_t cnt;
10630                 unsigned int pos = 0;
10631
10632                 ptr = (u8 *)&buf[0];
10633                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10634                         cnt = pci_read_vpd(tp->pdev, pos,
10635                                            len - pos, ptr);
10636                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10637                                 cnt = 0;
10638                         else if (cnt < 0)
10639                                 goto error;
10640                 }
10641                 if (pos != len)
10642                         goto error;
10643         }
10644
10645         *vpdlen = len;
10646
10647         return buf;
10648
10649 error:
10650         kfree(buf);
10651         return NULL;
10652 }
10653
10654 #define NVRAM_TEST_SIZE 0x100
10655 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10656 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10657 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10658 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10659 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10660 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10661 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10662 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10663
10664 static int tg3_test_nvram(struct tg3 *tp)
10665 {
10666         u32 csum, magic, len;
10667         __be32 *buf;
10668         int i, j, k, err = 0, size;
10669
10670         if (tg3_flag(tp, NO_NVRAM))
10671                 return 0;
10672
10673         if (tg3_nvram_read(tp, 0, &magic) != 0)
10674                 return -EIO;
10675
10676         if (magic == TG3_EEPROM_MAGIC)
10677                 size = NVRAM_TEST_SIZE;
10678         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10679                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10680                     TG3_EEPROM_SB_FORMAT_1) {
10681                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10682                         case TG3_EEPROM_SB_REVISION_0:
10683                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10684                                 break;
10685                         case TG3_EEPROM_SB_REVISION_2:
10686                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10687                                 break;
10688                         case TG3_EEPROM_SB_REVISION_3:
10689                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10690                                 break;
10691                         case TG3_EEPROM_SB_REVISION_4:
10692                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10693                                 break;
10694                         case TG3_EEPROM_SB_REVISION_5:
10695                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10696                                 break;
10697                         case TG3_EEPROM_SB_REVISION_6:
10698                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10699                                 break;
10700                         default:
10701                                 return -EIO;
10702                         }
10703                 } else
10704                         return 0;
10705         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10706                 size = NVRAM_SELFBOOT_HW_SIZE;
10707         else
10708                 return -EIO;
10709
10710         buf = kmalloc(size, GFP_KERNEL);
10711         if (buf == NULL)
10712                 return -ENOMEM;
10713
10714         err = -EIO;
10715         for (i = 0, j = 0; i < size; i += 4, j++) {
10716                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10717                 if (err)
10718                         break;
10719         }
10720         if (i < size)
10721                 goto out;
10722
10723         /* Selfboot format */
10724         magic = be32_to_cpu(buf[0]);
10725         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10726             TG3_EEPROM_MAGIC_FW) {
10727                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10728
10729                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10730                     TG3_EEPROM_SB_REVISION_2) {
10731                         /* For rev 2, the csum doesn't include the MBA. */
10732                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10733                                 csum8 += buf8[i];
10734                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10735                                 csum8 += buf8[i];
10736                 } else {
10737                         for (i = 0; i < size; i++)
10738                                 csum8 += buf8[i];
10739                 }
10740
10741                 if (csum8 == 0) {
10742                         err = 0;
10743                         goto out;
10744                 }
10745
10746                 err = -EIO;
10747                 goto out;
10748         }
10749
10750         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10751             TG3_EEPROM_MAGIC_HW) {
10752                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10753                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10754                 u8 *buf8 = (u8 *) buf;
10755
10756                 /* Separate the parity bits and the data bytes.  */
10757                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10758                         if ((i == 0) || (i == 8)) {
10759                                 int l;
10760                                 u8 msk;
10761
10762                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10763                                         parity[k++] = buf8[i] & msk;
10764                                 i++;
10765                         } else if (i == 16) {
10766                                 int l;
10767                                 u8 msk;
10768
10769                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10770                                         parity[k++] = buf8[i] & msk;
10771                                 i++;
10772
10773                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10774                                         parity[k++] = buf8[i] & msk;
10775                                 i++;
10776                         }
10777                         data[j++] = buf8[i];
10778                 }
10779
10780                 err = -EIO;
10781                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10782                         u8 hw8 = hweight8(data[i]);
10783
10784                         if ((hw8 & 0x1) && parity[i])
10785                                 goto out;
10786                         else if (!(hw8 & 0x1) && !parity[i])
10787                                 goto out;
10788                 }
10789                 err = 0;
10790                 goto out;
10791         }
10792
10793         err = -EIO;
10794
10795         /* Bootstrap checksum at offset 0x10 */
10796         csum = calc_crc((unsigned char *) buf, 0x10);
10797         if (csum != le32_to_cpu(buf[0x10/4]))
10798                 goto out;
10799
10800         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10801         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10802         if (csum != le32_to_cpu(buf[0xfc/4]))
10803                 goto out;
10804
10805         kfree(buf);
10806
10807         buf = tg3_vpd_readblock(tp, &len);
10808         if (!buf)
10809                 return -ENOMEM;
10810
10811         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10812         if (i > 0) {
10813                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10814                 if (j < 0)
10815                         goto out;
10816
10817                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10818                         goto out;
10819
10820                 i += PCI_VPD_LRDT_TAG_SIZE;
10821                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10822                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10823                 if (j > 0) {
10824                         u8 csum8 = 0;
10825
10826                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10827
10828                         for (i = 0; i <= j; i++)
10829                                 csum8 += ((u8 *)buf)[i];
10830
10831                         if (csum8)
10832                                 goto out;
10833                 }
10834         }
10835
10836         err = 0;
10837
10838 out:
10839         kfree(buf);
10840         return err;
10841 }
10842
10843 #define TG3_SERDES_TIMEOUT_SEC  2
10844 #define TG3_COPPER_TIMEOUT_SEC  6
10845
10846 static int tg3_test_link(struct tg3 *tp)
10847 {
10848         int i, max;
10849
10850         if (!netif_running(tp->dev))
10851                 return -ENODEV;
10852
10853         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10854                 max = TG3_SERDES_TIMEOUT_SEC;
10855         else
10856                 max = TG3_COPPER_TIMEOUT_SEC;
10857
10858         for (i = 0; i < max; i++) {
10859                 if (netif_carrier_ok(tp->dev))
10860                         return 0;
10861
10862                 if (msleep_interruptible(1000))
10863                         break;
10864         }
10865
10866         return -EIO;
10867 }
10868
10869 /* Only test the commonly used registers */
10870 static int tg3_test_registers(struct tg3 *tp)
10871 {
10872         int i, is_5705, is_5750;
10873         u32 offset, read_mask, write_mask, val, save_val, read_val;
10874         static struct {
10875                 u16 offset;
10876                 u16 flags;
10877 #define TG3_FL_5705     0x1
10878 #define TG3_FL_NOT_5705 0x2
10879 #define TG3_FL_NOT_5788 0x4
10880 #define TG3_FL_NOT_5750 0x8
10881                 u32 read_mask;
10882                 u32 write_mask;
10883         } reg_tbl[] = {
10884                 /* MAC Control Registers */
10885                 { MAC_MODE, TG3_FL_NOT_5705,
10886                         0x00000000, 0x00ef6f8c },
10887                 { MAC_MODE, TG3_FL_5705,
10888                         0x00000000, 0x01ef6b8c },
10889                 { MAC_STATUS, TG3_FL_NOT_5705,
10890                         0x03800107, 0x00000000 },
10891                 { MAC_STATUS, TG3_FL_5705,
10892                         0x03800100, 0x00000000 },
10893                 { MAC_ADDR_0_HIGH, 0x0000,
10894                         0x00000000, 0x0000ffff },
10895                 { MAC_ADDR_0_LOW, 0x0000,
10896                         0x00000000, 0xffffffff },
10897                 { MAC_RX_MTU_SIZE, 0x0000,
10898                         0x00000000, 0x0000ffff },
10899                 { MAC_TX_MODE, 0x0000,
10900                         0x00000000, 0x00000070 },
10901                 { MAC_TX_LENGTHS, 0x0000,
10902                         0x00000000, 0x00003fff },
10903                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10904                         0x00000000, 0x000007fc },
10905                 { MAC_RX_MODE, TG3_FL_5705,
10906                         0x00000000, 0x000007dc },
10907                 { MAC_HASH_REG_0, 0x0000,
10908                         0x00000000, 0xffffffff },
10909                 { MAC_HASH_REG_1, 0x0000,
10910                         0x00000000, 0xffffffff },
10911                 { MAC_HASH_REG_2, 0x0000,
10912                         0x00000000, 0xffffffff },
10913                 { MAC_HASH_REG_3, 0x0000,
10914                         0x00000000, 0xffffffff },
10915
10916                 /* Receive Data and Receive BD Initiator Control Registers. */
10917                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10918                         0x00000000, 0xffffffff },
10919                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10920                         0x00000000, 0xffffffff },
10921                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10922                         0x00000000, 0x00000003 },
10923                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10924                         0x00000000, 0xffffffff },
10925                 { RCVDBDI_STD_BD+0, 0x0000,
10926                         0x00000000, 0xffffffff },
10927                 { RCVDBDI_STD_BD+4, 0x0000,
10928                         0x00000000, 0xffffffff },
10929                 { RCVDBDI_STD_BD+8, 0x0000,
10930                         0x00000000, 0xffff0002 },
10931                 { RCVDBDI_STD_BD+0xc, 0x0000,
10932                         0x00000000, 0xffffffff },
10933
10934                 /* Receive BD Initiator Control Registers. */
10935                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10936                         0x00000000, 0xffffffff },
10937                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10938                         0x00000000, 0x000003ff },
10939                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10940                         0x00000000, 0xffffffff },
10941
10942                 /* Host Coalescing Control Registers. */
10943                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10944                         0x00000000, 0x00000004 },
10945                 { HOSTCC_MODE, TG3_FL_5705,
10946                         0x00000000, 0x000000f6 },
10947                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10948                         0x00000000, 0xffffffff },
10949                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10950                         0x00000000, 0x000003ff },
10951                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10952                         0x00000000, 0xffffffff },
10953                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10954                         0x00000000, 0x000003ff },
10955                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10956                         0x00000000, 0xffffffff },
10957                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10958                         0x00000000, 0x000000ff },
10959                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10960                         0x00000000, 0xffffffff },
10961                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10962                         0x00000000, 0x000000ff },
10963                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10964                         0x00000000, 0xffffffff },
10965                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10966                         0x00000000, 0xffffffff },
10967                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10968                         0x00000000, 0xffffffff },
10969                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10970                         0x00000000, 0x000000ff },
10971                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10972                         0x00000000, 0xffffffff },
10973                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10974                         0x00000000, 0x000000ff },
10975                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10976                         0x00000000, 0xffffffff },
10977                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10978                         0x00000000, 0xffffffff },
10979                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10980                         0x00000000, 0xffffffff },
10981                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10982                         0x00000000, 0xffffffff },
10983                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10984                         0x00000000, 0xffffffff },
10985                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10986                         0xffffffff, 0x00000000 },
10987                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10988                         0xffffffff, 0x00000000 },
10989
10990                 /* Buffer Manager Control Registers. */
10991                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10992                         0x00000000, 0x007fff80 },
10993                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10994                         0x00000000, 0x007fffff },
10995                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10996                         0x00000000, 0x0000003f },
10997                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10998                         0x00000000, 0x000001ff },
10999                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11000                         0x00000000, 0x000001ff },
11001                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11002                         0xffffffff, 0x00000000 },
11003                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11004                         0xffffffff, 0x00000000 },
11005
11006                 /* Mailbox Registers */
11007                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11008                         0x00000000, 0x000001ff },
11009                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11010                         0x00000000, 0x000001ff },
11011                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11012                         0x00000000, 0x000007ff },
11013                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11014                         0x00000000, 0x000001ff },
11015
11016                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11017         };
11018
11019         is_5705 = is_5750 = 0;
11020         if (tg3_flag(tp, 5705_PLUS)) {
11021                 is_5705 = 1;
11022                 if (tg3_flag(tp, 5750_PLUS))
11023                         is_5750 = 1;
11024         }
11025
11026         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11027                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11028                         continue;
11029
11030                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11031                         continue;
11032
11033                 if (tg3_flag(tp, IS_5788) &&
11034                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11035                         continue;
11036
11037                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11038                         continue;
11039
11040                 offset = (u32) reg_tbl[i].offset;
11041                 read_mask = reg_tbl[i].read_mask;
11042                 write_mask = reg_tbl[i].write_mask;
11043
11044                 /* Save the original register content */
11045                 save_val = tr32(offset);
11046
11047                 /* Determine the read-only value. */
11048                 read_val = save_val & read_mask;
11049
11050                 /* Write zero to the register, then make sure the read-only bits
11051                  * are not changed and the read/write bits are all zeros.
11052                  */
11053                 tw32(offset, 0);
11054
11055                 val = tr32(offset);
11056
11057                 /* Test the read-only and read/write bits. */
11058                 if (((val & read_mask) != read_val) || (val & write_mask))
11059                         goto out;
11060
11061                 /* Write ones to all the bits defined by RdMask and WrMask, then
11062                  * make sure the read-only bits are not changed and the
11063                  * read/write bits are all ones.
11064                  */
11065                 tw32(offset, read_mask | write_mask);
11066
11067                 val = tr32(offset);
11068
11069                 /* Test the read-only bits. */
11070                 if ((val & read_mask) != read_val)
11071                         goto out;
11072
11073                 /* Test the read/write bits. */
11074                 if ((val & write_mask) != write_mask)
11075                         goto out;
11076
11077                 tw32(offset, save_val);
11078         }
11079
11080         return 0;
11081
11082 out:
11083         if (netif_msg_hw(tp))
11084                 netdev_err(tp->dev,
11085                            "Register test failed at offset %x\n", offset);
11086         tw32(offset, save_val);
11087         return -EIO;
11088 }
11089
11090 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11091 {
11092         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11093         int i;
11094         u32 j;
11095
11096         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11097                 for (j = 0; j < len; j += 4) {
11098                         u32 val;
11099
11100                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11101                         tg3_read_mem(tp, offset + j, &val);
11102                         if (val != test_pattern[i])
11103                                 return -EIO;
11104                 }
11105         }
11106         return 0;
11107 }
11108
11109 static int tg3_test_memory(struct tg3 *tp)
11110 {
11111         static struct mem_entry {
11112                 u32 offset;
11113                 u32 len;
11114         } mem_tbl_570x[] = {
11115                 { 0x00000000, 0x00b50},
11116                 { 0x00002000, 0x1c000},
11117                 { 0xffffffff, 0x00000}
11118         }, mem_tbl_5705[] = {
11119                 { 0x00000100, 0x0000c},
11120                 { 0x00000200, 0x00008},
11121                 { 0x00004000, 0x00800},
11122                 { 0x00006000, 0x01000},
11123                 { 0x00008000, 0x02000},
11124                 { 0x00010000, 0x0e000},
11125                 { 0xffffffff, 0x00000}
11126         }, mem_tbl_5755[] = {
11127                 { 0x00000200, 0x00008},
11128                 { 0x00004000, 0x00800},
11129                 { 0x00006000, 0x00800},
11130                 { 0x00008000, 0x02000},
11131                 { 0x00010000, 0x0c000},
11132                 { 0xffffffff, 0x00000}
11133         }, mem_tbl_5906[] = {
11134                 { 0x00000200, 0x00008},
11135                 { 0x00004000, 0x00400},
11136                 { 0x00006000, 0x00400},
11137                 { 0x00008000, 0x01000},
11138                 { 0x00010000, 0x01000},
11139                 { 0xffffffff, 0x00000}
11140         }, mem_tbl_5717[] = {
11141                 { 0x00000200, 0x00008},
11142                 { 0x00010000, 0x0a000},
11143                 { 0x00020000, 0x13c00},
11144                 { 0xffffffff, 0x00000}
11145         }, mem_tbl_57765[] = {
11146                 { 0x00000200, 0x00008},
11147                 { 0x00004000, 0x00800},
11148                 { 0x00006000, 0x09800},
11149                 { 0x00010000, 0x0a000},
11150                 { 0xffffffff, 0x00000}
11151         };
11152         struct mem_entry *mem_tbl;
11153         int err = 0;
11154         int i;
11155
11156         if (tg3_flag(tp, 5717_PLUS))
11157                 mem_tbl = mem_tbl_5717;
11158         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11159                 mem_tbl = mem_tbl_57765;
11160         else if (tg3_flag(tp, 5755_PLUS))
11161                 mem_tbl = mem_tbl_5755;
11162         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11163                 mem_tbl = mem_tbl_5906;
11164         else if (tg3_flag(tp, 5705_PLUS))
11165                 mem_tbl = mem_tbl_5705;
11166         else
11167                 mem_tbl = mem_tbl_570x;
11168
11169         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11170                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11171                 if (err)
11172                         break;
11173         }
11174
11175         return err;
11176 }
11177
11178 #define TG3_MAC_LOOPBACK        0
11179 #define TG3_PHY_LOOPBACK        1
11180 #define TG3_TSO_LOOPBACK        2
11181
11182 #define TG3_TSO_MSS             500
11183
11184 #define TG3_TSO_IP_HDR_LEN      20
11185 #define TG3_TSO_TCP_HDR_LEN     20
11186 #define TG3_TSO_TCP_OPT_LEN     12
11187
11188 static const u8 tg3_tso_header[] = {
11189 0x08, 0x00,
11190 0x45, 0x00, 0x00, 0x00,
11191 0x00, 0x00, 0x40, 0x00,
11192 0x40, 0x06, 0x00, 0x00,
11193 0x0a, 0x00, 0x00, 0x01,
11194 0x0a, 0x00, 0x00, 0x02,
11195 0x0d, 0x00, 0xe0, 0x00,
11196 0x00, 0x00, 0x01, 0x00,
11197 0x00, 0x00, 0x02, 0x00,
11198 0x80, 0x10, 0x10, 0x00,
11199 0x14, 0x09, 0x00, 0x00,
11200 0x01, 0x01, 0x08, 0x0a,
11201 0x11, 0x11, 0x11, 0x11,
11202 0x11, 0x11, 0x11, 0x11,
11203 };
11204
11205 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11206 {
11207         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11208         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11209         struct sk_buff *skb, *rx_skb;
11210         u8 *tx_data;
11211         dma_addr_t map;
11212         int num_pkts, tx_len, rx_len, i, err;
11213         struct tg3_rx_buffer_desc *desc;
11214         struct tg3_napi *tnapi, *rnapi;
11215         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11216
11217         tnapi = &tp->napi[0];
11218         rnapi = &tp->napi[0];
11219         if (tp->irq_cnt > 1) {
11220                 if (tg3_flag(tp, ENABLE_RSS))
11221                         rnapi = &tp->napi[1];
11222                 if (tg3_flag(tp, ENABLE_TSS))
11223                         tnapi = &tp->napi[1];
11224         }
11225         coal_now = tnapi->coal_now | rnapi->coal_now;
11226
11227         if (loopback_mode == TG3_MAC_LOOPBACK) {
11228                 /* HW errata - mac loopback fails in some cases on 5780.
11229                  * Normal traffic and PHY loopback are not affected by
11230                  * errata.  Also, the MAC loopback test is deprecated for
11231                  * all newer ASIC revisions.
11232                  */
11233                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11234                     tg3_flag(tp, CPMU_PRESENT))
11235                         return 0;
11236
11237                 mac_mode = tp->mac_mode &
11238                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11239                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11240                 if (!tg3_flag(tp, 5705_PLUS))
11241                         mac_mode |= MAC_MODE_LINK_POLARITY;
11242                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11243                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11244                 else
11245                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11246                 tw32(MAC_MODE, mac_mode);
11247         } else {
11248                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11249                         tg3_phy_fet_toggle_apd(tp, false);
11250                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11251                 } else
11252                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11253
11254                 tg3_phy_toggle_automdix(tp, 0);
11255
11256                 tg3_writephy(tp, MII_BMCR, val);
11257                 udelay(40);
11258
11259                 mac_mode = tp->mac_mode &
11260                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11261                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11262                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11263                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11264                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11265                         /* The write needs to be flushed for the AC131 */
11266                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11267                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11268                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11269                 } else
11270                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11271
11272                 /* reset to prevent losing 1st rx packet intermittently */
11273                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11274                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11275                         udelay(10);
11276                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11277                 }
11278                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11279                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11280                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11281                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11282                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11283                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11284                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11285                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11286                 }
11287                 tw32(MAC_MODE, mac_mode);
11288
11289                 /* Wait for link */
11290                 for (i = 0; i < 100; i++) {
11291                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11292                                 break;
11293                         mdelay(1);
11294                 }
11295         }
11296
11297         err = -EIO;
11298
11299         tx_len = pktsz;
11300         skb = netdev_alloc_skb(tp->dev, tx_len);
11301         if (!skb)
11302                 return -ENOMEM;
11303
11304         tx_data = skb_put(skb, tx_len);
11305         memcpy(tx_data, tp->dev->dev_addr, 6);
11306         memset(tx_data + 6, 0x0, 8);
11307
11308         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11309
11310         if (loopback_mode == TG3_TSO_LOOPBACK) {
11311                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11312
11313                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11314                               TG3_TSO_TCP_OPT_LEN;
11315
11316                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11317                        sizeof(tg3_tso_header));
11318                 mss = TG3_TSO_MSS;
11319
11320                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11321                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11322
11323                 /* Set the total length field in the IP header */
11324                 iph->tot_len = htons((u16)(mss + hdr_len));
11325
11326                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11327                               TXD_FLAG_CPU_POST_DMA);
11328
11329                 if (tg3_flag(tp, HW_TSO_1) ||
11330                     tg3_flag(tp, HW_TSO_2) ||
11331                     tg3_flag(tp, HW_TSO_3)) {
11332                         struct tcphdr *th;
11333                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11334                         th = (struct tcphdr *)&tx_data[val];
11335                         th->check = 0;
11336                 } else
11337                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11338
11339                 if (tg3_flag(tp, HW_TSO_3)) {
11340                         mss |= (hdr_len & 0xc) << 12;
11341                         if (hdr_len & 0x10)
11342                                 base_flags |= 0x00000010;
11343                         base_flags |= (hdr_len & 0x3e0) << 5;
11344                 } else if (tg3_flag(tp, HW_TSO_2))
11345                         mss |= hdr_len << 9;
11346                 else if (tg3_flag(tp, HW_TSO_1) ||
11347                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11348                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11349                 } else {
11350                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11351                 }
11352
11353                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11354         } else {
11355                 num_pkts = 1;
11356                 data_off = ETH_HLEN;
11357         }
11358
11359         for (i = data_off; i < tx_len; i++)
11360                 tx_data[i] = (u8) (i & 0xff);
11361
11362         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11363         if (pci_dma_mapping_error(tp->pdev, map)) {
11364                 dev_kfree_skb(skb);
11365                 return -EIO;
11366         }
11367
11368         val = tnapi->tx_prod;
11369         tnapi->tx_buffers[val].skb = skb;
11370         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11371
11372         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11373                rnapi->coal_now);
11374
11375         udelay(10);
11376
11377         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11378
11379         if (tg3_tx_frag_set(tnapi, tnapi->tx_prod, map, tx_len,
11380                             base_flags | TXD_FLAG_END, mss, 0)) {
11381                 tnapi->tx_buffers[val].skb = NULL;
11382                 dev_kfree_skb(skb);
11383                 return -EIO;
11384         }
11385
11386         tnapi->tx_prod++;
11387
11388         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11389         tr32_mailbox(tnapi->prodmbox);
11390
11391         udelay(10);
11392
11393         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11394         for (i = 0; i < 35; i++) {
11395                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11396                        coal_now);
11397
11398                 udelay(10);
11399
11400                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11401                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11402                 if ((tx_idx == tnapi->tx_prod) &&
11403                     (rx_idx == (rx_start_idx + num_pkts)))
11404                         break;
11405         }
11406
11407         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11408         dev_kfree_skb(skb);
11409
11410         if (tx_idx != tnapi->tx_prod)
11411                 goto out;
11412
11413         if (rx_idx != rx_start_idx + num_pkts)
11414                 goto out;
11415
11416         val = data_off;
11417         while (rx_idx != rx_start_idx) {
11418                 desc = &rnapi->rx_rcb[rx_start_idx++];
11419                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11420                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11421
11422                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11423                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11424                         goto out;
11425
11426                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11427                          - ETH_FCS_LEN;
11428
11429                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11430                         if (rx_len != tx_len)
11431                                 goto out;
11432
11433                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11434                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11435                                         goto out;
11436                         } else {
11437                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11438                                         goto out;
11439                         }
11440                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11441                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11442                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11443                         goto out;
11444                 }
11445
11446                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11447                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11448                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11449                                              mapping);
11450                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11451                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11452                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11453                                              mapping);
11454                 } else
11455                         goto out;
11456
11457                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11458                                             PCI_DMA_FROMDEVICE);
11459
11460                 for (i = data_off; i < rx_len; i++, val++) {
11461                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11462                                 goto out;
11463                 }
11464         }
11465
11466         err = 0;
11467
11468         /* tg3_free_rings will unmap and free the rx_skb */
11469 out:
11470         return err;
11471 }
11472
11473 #define TG3_STD_LOOPBACK_FAILED         1
11474 #define TG3_JMB_LOOPBACK_FAILED         2
11475 #define TG3_TSO_LOOPBACK_FAILED         4
11476
11477 #define TG3_MAC_LOOPBACK_SHIFT          0
11478 #define TG3_PHY_LOOPBACK_SHIFT          4
11479 #define TG3_LOOPBACK_FAILED             0x00000077
11480
11481 static int tg3_test_loopback(struct tg3 *tp)
11482 {
11483         int err = 0;
11484         u32 eee_cap, cpmuctrl = 0;
11485
11486         if (!netif_running(tp->dev))
11487                 return TG3_LOOPBACK_FAILED;
11488
11489         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11490         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11491
11492         err = tg3_reset_hw(tp, 1);
11493         if (err) {
11494                 err = TG3_LOOPBACK_FAILED;
11495                 goto done;
11496         }
11497
11498         if (tg3_flag(tp, ENABLE_RSS)) {
11499                 int i;
11500
11501                 /* Reroute all rx packets to the 1st queue */
11502                 for (i = MAC_RSS_INDIR_TBL_0;
11503                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11504                         tw32(i, 0x0);
11505         }
11506
11507         /* Turn off gphy autopowerdown. */
11508         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11509                 tg3_phy_toggle_apd(tp, false);
11510
11511         if (tg3_flag(tp, CPMU_PRESENT)) {
11512                 int i;
11513                 u32 status;
11514
11515                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11516
11517                 /* Wait for up to 40 microseconds to acquire lock. */
11518                 for (i = 0; i < 4; i++) {
11519                         status = tr32(TG3_CPMU_MUTEX_GNT);
11520                         if (status == CPMU_MUTEX_GNT_DRIVER)
11521                                 break;
11522                         udelay(10);
11523                 }
11524
11525                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11526                         err = TG3_LOOPBACK_FAILED;
11527                         goto done;
11528                 }
11529
11530                 /* Turn off link-based power management. */
11531                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11532                 tw32(TG3_CPMU_CTRL,
11533                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11534                                   CPMU_CTRL_LINK_AWARE_MODE));
11535         }
11536
11537         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11538                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11539
11540         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11541             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11542                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11543
11544         if (tg3_flag(tp, CPMU_PRESENT)) {
11545                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11546
11547                 /* Release the mutex */
11548                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11549         }
11550
11551         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11552             !tg3_flag(tp, USE_PHYLIB)) {
11553                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11554                         err |= TG3_STD_LOOPBACK_FAILED <<
11555                                TG3_PHY_LOOPBACK_SHIFT;
11556                 if (tg3_flag(tp, TSO_CAPABLE) &&
11557                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11558                         err |= TG3_TSO_LOOPBACK_FAILED <<
11559                                TG3_PHY_LOOPBACK_SHIFT;
11560                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11561                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11562                         err |= TG3_JMB_LOOPBACK_FAILED <<
11563                                TG3_PHY_LOOPBACK_SHIFT;
11564         }
11565
11566         /* Re-enable gphy autopowerdown. */
11567         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11568                 tg3_phy_toggle_apd(tp, true);
11569
11570 done:
11571         tp->phy_flags |= eee_cap;
11572
11573         return err;
11574 }
11575
11576 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11577                           u64 *data)
11578 {
11579         struct tg3 *tp = netdev_priv(dev);
11580
11581         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11582             tg3_power_up(tp)) {
11583                 etest->flags |= ETH_TEST_FL_FAILED;
11584                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11585                 return;
11586         }
11587
11588         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11589
11590         if (tg3_test_nvram(tp) != 0) {
11591                 etest->flags |= ETH_TEST_FL_FAILED;
11592                 data[0] = 1;
11593         }
11594         if (tg3_test_link(tp) != 0) {
11595                 etest->flags |= ETH_TEST_FL_FAILED;
11596                 data[1] = 1;
11597         }
11598         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11599                 int err, err2 = 0, irq_sync = 0;
11600
11601                 if (netif_running(dev)) {
11602                         tg3_phy_stop(tp);
11603                         tg3_netif_stop(tp);
11604                         irq_sync = 1;
11605                 }
11606
11607                 tg3_full_lock(tp, irq_sync);
11608
11609                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11610                 err = tg3_nvram_lock(tp);
11611                 tg3_halt_cpu(tp, RX_CPU_BASE);
11612                 if (!tg3_flag(tp, 5705_PLUS))
11613                         tg3_halt_cpu(tp, TX_CPU_BASE);
11614                 if (!err)
11615                         tg3_nvram_unlock(tp);
11616
11617                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11618                         tg3_phy_reset(tp);
11619
11620                 if (tg3_test_registers(tp) != 0) {
11621                         etest->flags |= ETH_TEST_FL_FAILED;
11622                         data[2] = 1;
11623                 }
11624                 if (tg3_test_memory(tp) != 0) {
11625                         etest->flags |= ETH_TEST_FL_FAILED;
11626                         data[3] = 1;
11627                 }
11628                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11629                         etest->flags |= ETH_TEST_FL_FAILED;
11630
11631                 tg3_full_unlock(tp);
11632
11633                 if (tg3_test_interrupt(tp) != 0) {
11634                         etest->flags |= ETH_TEST_FL_FAILED;
11635                         data[5] = 1;
11636                 }
11637
11638                 tg3_full_lock(tp, 0);
11639
11640                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11641                 if (netif_running(dev)) {
11642                         tg3_flag_set(tp, INIT_COMPLETE);
11643                         err2 = tg3_restart_hw(tp, 1);
11644                         if (!err2)
11645                                 tg3_netif_start(tp);
11646                 }
11647
11648                 tg3_full_unlock(tp);
11649
11650                 if (irq_sync && !err2)
11651                         tg3_phy_start(tp);
11652         }
11653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11654                 tg3_power_down(tp);
11655
11656 }
11657
11658 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11659 {
11660         struct mii_ioctl_data *data = if_mii(ifr);
11661         struct tg3 *tp = netdev_priv(dev);
11662         int err;
11663
11664         if (tg3_flag(tp, USE_PHYLIB)) {
11665                 struct phy_device *phydev;
11666                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11667                         return -EAGAIN;
11668                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11669                 return phy_mii_ioctl(phydev, ifr, cmd);
11670         }
11671
11672         switch (cmd) {
11673         case SIOCGMIIPHY:
11674                 data->phy_id = tp->phy_addr;
11675
11676                 /* fallthru */
11677         case SIOCGMIIREG: {
11678                 u32 mii_regval;
11679
11680                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11681                         break;                  /* We have no PHY */
11682
11683                 if (!netif_running(dev))
11684                         return -EAGAIN;
11685
11686                 spin_lock_bh(&tp->lock);
11687                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11688                 spin_unlock_bh(&tp->lock);
11689
11690                 data->val_out = mii_regval;
11691
11692                 return err;
11693         }
11694
11695         case SIOCSMIIREG:
11696                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11697                         break;                  /* We have no PHY */
11698
11699                 if (!netif_running(dev))
11700                         return -EAGAIN;
11701
11702                 spin_lock_bh(&tp->lock);
11703                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11704                 spin_unlock_bh(&tp->lock);
11705
11706                 return err;
11707
11708         default:
11709                 /* do nothing */
11710                 break;
11711         }
11712         return -EOPNOTSUPP;
11713 }
11714
11715 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11716 {
11717         struct tg3 *tp = netdev_priv(dev);
11718
11719         memcpy(ec, &tp->coal, sizeof(*ec));
11720         return 0;
11721 }
11722
11723 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11724 {
11725         struct tg3 *tp = netdev_priv(dev);
11726         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11727         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11728
11729         if (!tg3_flag(tp, 5705_PLUS)) {
11730                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11731                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11732                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11733                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11734         }
11735
11736         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11737             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11738             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11739             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11740             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11741             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11742             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11743             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11744             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11745             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11746                 return -EINVAL;
11747
11748         /* No rx interrupts will be generated if both are zero */
11749         if ((ec->rx_coalesce_usecs == 0) &&
11750             (ec->rx_max_coalesced_frames == 0))
11751                 return -EINVAL;
11752
11753         /* No tx interrupts will be generated if both are zero */
11754         if ((ec->tx_coalesce_usecs == 0) &&
11755             (ec->tx_max_coalesced_frames == 0))
11756                 return -EINVAL;
11757
11758         /* Only copy relevant parameters, ignore all others. */
11759         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11760         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11761         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11762         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11763         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11764         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11765         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11766         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11767         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11768
11769         if (netif_running(dev)) {
11770                 tg3_full_lock(tp, 0);
11771                 __tg3_set_coalesce(tp, &tp->coal);
11772                 tg3_full_unlock(tp);
11773         }
11774         return 0;
11775 }
11776
11777 static const struct ethtool_ops tg3_ethtool_ops = {
11778         .get_settings           = tg3_get_settings,
11779         .set_settings           = tg3_set_settings,
11780         .get_drvinfo            = tg3_get_drvinfo,
11781         .get_regs_len           = tg3_get_regs_len,
11782         .get_regs               = tg3_get_regs,
11783         .get_wol                = tg3_get_wol,
11784         .set_wol                = tg3_set_wol,
11785         .get_msglevel           = tg3_get_msglevel,
11786         .set_msglevel           = tg3_set_msglevel,
11787         .nway_reset             = tg3_nway_reset,
11788         .get_link               = ethtool_op_get_link,
11789         .get_eeprom_len         = tg3_get_eeprom_len,
11790         .get_eeprom             = tg3_get_eeprom,
11791         .set_eeprom             = tg3_set_eeprom,
11792         .get_ringparam          = tg3_get_ringparam,
11793         .set_ringparam          = tg3_set_ringparam,
11794         .get_pauseparam         = tg3_get_pauseparam,
11795         .set_pauseparam         = tg3_set_pauseparam,
11796         .self_test              = tg3_self_test,
11797         .get_strings            = tg3_get_strings,
11798         .set_phys_id            = tg3_set_phys_id,
11799         .get_ethtool_stats      = tg3_get_ethtool_stats,
11800         .get_coalesce           = tg3_get_coalesce,
11801         .set_coalesce           = tg3_set_coalesce,
11802         .get_sset_count         = tg3_get_sset_count,
11803 };
11804
11805 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11806 {
11807         u32 cursize, val, magic;
11808
11809         tp->nvram_size = EEPROM_CHIP_SIZE;
11810
11811         if (tg3_nvram_read(tp, 0, &magic) != 0)
11812                 return;
11813
11814         if ((magic != TG3_EEPROM_MAGIC) &&
11815             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11816             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11817                 return;
11818
11819         /*
11820          * Size the chip by reading offsets at increasing powers of two.
11821          * When we encounter our validation signature, we know the addressing
11822          * has wrapped around, and thus have our chip size.
11823          */
11824         cursize = 0x10;
11825
11826         while (cursize < tp->nvram_size) {
11827                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11828                         return;
11829
11830                 if (val == magic)
11831                         break;
11832
11833                 cursize <<= 1;
11834         }
11835
11836         tp->nvram_size = cursize;
11837 }
11838
11839 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11840 {
11841         u32 val;
11842
11843         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11844                 return;
11845
11846         /* Selfboot format */
11847         if (val != TG3_EEPROM_MAGIC) {
11848                 tg3_get_eeprom_size(tp);
11849                 return;
11850         }
11851
11852         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11853                 if (val != 0) {
11854                         /* This is confusing.  We want to operate on the
11855                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11856                          * call will read from NVRAM and byteswap the data
11857                          * according to the byteswapping settings for all
11858                          * other register accesses.  This ensures the data we
11859                          * want will always reside in the lower 16-bits.
11860                          * However, the data in NVRAM is in LE format, which
11861                          * means the data from the NVRAM read will always be
11862                          * opposite the endianness of the CPU.  The 16-bit
11863                          * byteswap then brings the data to CPU endianness.
11864                          */
11865                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11866                         return;
11867                 }
11868         }
11869         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11870 }
11871
11872 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11873 {
11874         u32 nvcfg1;
11875
11876         nvcfg1 = tr32(NVRAM_CFG1);
11877         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11878                 tg3_flag_set(tp, FLASH);
11879         } else {
11880                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11881                 tw32(NVRAM_CFG1, nvcfg1);
11882         }
11883
11884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11885             tg3_flag(tp, 5780_CLASS)) {
11886                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11887                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11888                         tp->nvram_jedecnum = JEDEC_ATMEL;
11889                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11890                         tg3_flag_set(tp, NVRAM_BUFFERED);
11891                         break;
11892                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11893                         tp->nvram_jedecnum = JEDEC_ATMEL;
11894                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11895                         break;
11896                 case FLASH_VENDOR_ATMEL_EEPROM:
11897                         tp->nvram_jedecnum = JEDEC_ATMEL;
11898                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11899                         tg3_flag_set(tp, NVRAM_BUFFERED);
11900                         break;
11901                 case FLASH_VENDOR_ST:
11902                         tp->nvram_jedecnum = JEDEC_ST;
11903                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11904                         tg3_flag_set(tp, NVRAM_BUFFERED);
11905                         break;
11906                 case FLASH_VENDOR_SAIFUN:
11907                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11908                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11909                         break;
11910                 case FLASH_VENDOR_SST_SMALL:
11911                 case FLASH_VENDOR_SST_LARGE:
11912                         tp->nvram_jedecnum = JEDEC_SST;
11913                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11914                         break;
11915                 }
11916         } else {
11917                 tp->nvram_jedecnum = JEDEC_ATMEL;
11918                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11919                 tg3_flag_set(tp, NVRAM_BUFFERED);
11920         }
11921 }
11922
11923 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11924 {
11925         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11926         case FLASH_5752PAGE_SIZE_256:
11927                 tp->nvram_pagesize = 256;
11928                 break;
11929         case FLASH_5752PAGE_SIZE_512:
11930                 tp->nvram_pagesize = 512;
11931                 break;
11932         case FLASH_5752PAGE_SIZE_1K:
11933                 tp->nvram_pagesize = 1024;
11934                 break;
11935         case FLASH_5752PAGE_SIZE_2K:
11936                 tp->nvram_pagesize = 2048;
11937                 break;
11938         case FLASH_5752PAGE_SIZE_4K:
11939                 tp->nvram_pagesize = 4096;
11940                 break;
11941         case FLASH_5752PAGE_SIZE_264:
11942                 tp->nvram_pagesize = 264;
11943                 break;
11944         case FLASH_5752PAGE_SIZE_528:
11945                 tp->nvram_pagesize = 528;
11946                 break;
11947         }
11948 }
11949
11950 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11951 {
11952         u32 nvcfg1;
11953
11954         nvcfg1 = tr32(NVRAM_CFG1);
11955
11956         /* NVRAM protection for TPM */
11957         if (nvcfg1 & (1 << 27))
11958                 tg3_flag_set(tp, PROTECTED_NVRAM);
11959
11960         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11961         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11962         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11963                 tp->nvram_jedecnum = JEDEC_ATMEL;
11964                 tg3_flag_set(tp, NVRAM_BUFFERED);
11965                 break;
11966         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11967                 tp->nvram_jedecnum = JEDEC_ATMEL;
11968                 tg3_flag_set(tp, NVRAM_BUFFERED);
11969                 tg3_flag_set(tp, FLASH);
11970                 break;
11971         case FLASH_5752VENDOR_ST_M45PE10:
11972         case FLASH_5752VENDOR_ST_M45PE20:
11973         case FLASH_5752VENDOR_ST_M45PE40:
11974                 tp->nvram_jedecnum = JEDEC_ST;
11975                 tg3_flag_set(tp, NVRAM_BUFFERED);
11976                 tg3_flag_set(tp, FLASH);
11977                 break;
11978         }
11979
11980         if (tg3_flag(tp, FLASH)) {
11981                 tg3_nvram_get_pagesize(tp, nvcfg1);
11982         } else {
11983                 /* For eeprom, set pagesize to maximum eeprom size */
11984                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11985
11986                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11987                 tw32(NVRAM_CFG1, nvcfg1);
11988         }
11989 }
11990
11991 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11992 {
11993         u32 nvcfg1, protect = 0;
11994
11995         nvcfg1 = tr32(NVRAM_CFG1);
11996
11997         /* NVRAM protection for TPM */
11998         if (nvcfg1 & (1 << 27)) {
11999                 tg3_flag_set(tp, PROTECTED_NVRAM);
12000                 protect = 1;
12001         }
12002
12003         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12004         switch (nvcfg1) {
12005         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12006         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12007         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12008         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12009                 tp->nvram_jedecnum = JEDEC_ATMEL;
12010                 tg3_flag_set(tp, NVRAM_BUFFERED);
12011                 tg3_flag_set(tp, FLASH);
12012                 tp->nvram_pagesize = 264;
12013                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12014                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12015                         tp->nvram_size = (protect ? 0x3e200 :
12016                                           TG3_NVRAM_SIZE_512KB);
12017                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12018                         tp->nvram_size = (protect ? 0x1f200 :
12019                                           TG3_NVRAM_SIZE_256KB);
12020                 else
12021                         tp->nvram_size = (protect ? 0x1f200 :
12022                                           TG3_NVRAM_SIZE_128KB);
12023                 break;
12024         case FLASH_5752VENDOR_ST_M45PE10:
12025         case FLASH_5752VENDOR_ST_M45PE20:
12026         case FLASH_5752VENDOR_ST_M45PE40:
12027                 tp->nvram_jedecnum = JEDEC_ST;
12028                 tg3_flag_set(tp, NVRAM_BUFFERED);
12029                 tg3_flag_set(tp, FLASH);
12030                 tp->nvram_pagesize = 256;
12031                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12032                         tp->nvram_size = (protect ?
12033                                           TG3_NVRAM_SIZE_64KB :
12034                                           TG3_NVRAM_SIZE_128KB);
12035                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12036                         tp->nvram_size = (protect ?
12037                                           TG3_NVRAM_SIZE_64KB :
12038                                           TG3_NVRAM_SIZE_256KB);
12039                 else
12040                         tp->nvram_size = (protect ?
12041                                           TG3_NVRAM_SIZE_128KB :
12042                                           TG3_NVRAM_SIZE_512KB);
12043                 break;
12044         }
12045 }
12046
12047 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12048 {
12049         u32 nvcfg1;
12050
12051         nvcfg1 = tr32(NVRAM_CFG1);
12052
12053         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12054         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12055         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12056         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12057         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12058                 tp->nvram_jedecnum = JEDEC_ATMEL;
12059                 tg3_flag_set(tp, NVRAM_BUFFERED);
12060                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12061
12062                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12063                 tw32(NVRAM_CFG1, nvcfg1);
12064                 break;
12065         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12066         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12067         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12068         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12069                 tp->nvram_jedecnum = JEDEC_ATMEL;
12070                 tg3_flag_set(tp, NVRAM_BUFFERED);
12071                 tg3_flag_set(tp, FLASH);
12072                 tp->nvram_pagesize = 264;
12073                 break;
12074         case FLASH_5752VENDOR_ST_M45PE10:
12075         case FLASH_5752VENDOR_ST_M45PE20:
12076         case FLASH_5752VENDOR_ST_M45PE40:
12077                 tp->nvram_jedecnum = JEDEC_ST;
12078                 tg3_flag_set(tp, NVRAM_BUFFERED);
12079                 tg3_flag_set(tp, FLASH);
12080                 tp->nvram_pagesize = 256;
12081                 break;
12082         }
12083 }
12084
12085 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12086 {
12087         u32 nvcfg1, protect = 0;
12088
12089         nvcfg1 = tr32(NVRAM_CFG1);
12090
12091         /* NVRAM protection for TPM */
12092         if (nvcfg1 & (1 << 27)) {
12093                 tg3_flag_set(tp, PROTECTED_NVRAM);
12094                 protect = 1;
12095         }
12096
12097         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12098         switch (nvcfg1) {
12099         case FLASH_5761VENDOR_ATMEL_ADB021D:
12100         case FLASH_5761VENDOR_ATMEL_ADB041D:
12101         case FLASH_5761VENDOR_ATMEL_ADB081D:
12102         case FLASH_5761VENDOR_ATMEL_ADB161D:
12103         case FLASH_5761VENDOR_ATMEL_MDB021D:
12104         case FLASH_5761VENDOR_ATMEL_MDB041D:
12105         case FLASH_5761VENDOR_ATMEL_MDB081D:
12106         case FLASH_5761VENDOR_ATMEL_MDB161D:
12107                 tp->nvram_jedecnum = JEDEC_ATMEL;
12108                 tg3_flag_set(tp, NVRAM_BUFFERED);
12109                 tg3_flag_set(tp, FLASH);
12110                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12111                 tp->nvram_pagesize = 256;
12112                 break;
12113         case FLASH_5761VENDOR_ST_A_M45PE20:
12114         case FLASH_5761VENDOR_ST_A_M45PE40:
12115         case FLASH_5761VENDOR_ST_A_M45PE80:
12116         case FLASH_5761VENDOR_ST_A_M45PE16:
12117         case FLASH_5761VENDOR_ST_M_M45PE20:
12118         case FLASH_5761VENDOR_ST_M_M45PE40:
12119         case FLASH_5761VENDOR_ST_M_M45PE80:
12120         case FLASH_5761VENDOR_ST_M_M45PE16:
12121                 tp->nvram_jedecnum = JEDEC_ST;
12122                 tg3_flag_set(tp, NVRAM_BUFFERED);
12123                 tg3_flag_set(tp, FLASH);
12124                 tp->nvram_pagesize = 256;
12125                 break;
12126         }
12127
12128         if (protect) {
12129                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12130         } else {
12131                 switch (nvcfg1) {
12132                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12133                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12134                 case FLASH_5761VENDOR_ST_A_M45PE16:
12135                 case FLASH_5761VENDOR_ST_M_M45PE16:
12136                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12137                         break;
12138                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12139                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12140                 case FLASH_5761VENDOR_ST_A_M45PE80:
12141                 case FLASH_5761VENDOR_ST_M_M45PE80:
12142                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12143                         break;
12144                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12145                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12146                 case FLASH_5761VENDOR_ST_A_M45PE40:
12147                 case FLASH_5761VENDOR_ST_M_M45PE40:
12148                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12149                         break;
12150                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12151                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12152                 case FLASH_5761VENDOR_ST_A_M45PE20:
12153                 case FLASH_5761VENDOR_ST_M_M45PE20:
12154                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12155                         break;
12156                 }
12157         }
12158 }
12159
12160 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12161 {
12162         tp->nvram_jedecnum = JEDEC_ATMEL;
12163         tg3_flag_set(tp, NVRAM_BUFFERED);
12164         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12165 }
12166
12167 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12168 {
12169         u32 nvcfg1;
12170
12171         nvcfg1 = tr32(NVRAM_CFG1);
12172
12173         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12174         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12175         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12176                 tp->nvram_jedecnum = JEDEC_ATMEL;
12177                 tg3_flag_set(tp, NVRAM_BUFFERED);
12178                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12179
12180                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12181                 tw32(NVRAM_CFG1, nvcfg1);
12182                 return;
12183         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12184         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12185         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12186         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12187         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12188         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12189         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12190                 tp->nvram_jedecnum = JEDEC_ATMEL;
12191                 tg3_flag_set(tp, NVRAM_BUFFERED);
12192                 tg3_flag_set(tp, FLASH);
12193
12194                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12195                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12196                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12197                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12198                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12199                         break;
12200                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12201                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12202                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12203                         break;
12204                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12205                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12206                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12207                         break;
12208                 }
12209                 break;
12210         case FLASH_5752VENDOR_ST_M45PE10:
12211         case FLASH_5752VENDOR_ST_M45PE20:
12212         case FLASH_5752VENDOR_ST_M45PE40:
12213                 tp->nvram_jedecnum = JEDEC_ST;
12214                 tg3_flag_set(tp, NVRAM_BUFFERED);
12215                 tg3_flag_set(tp, FLASH);
12216
12217                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12218                 case FLASH_5752VENDOR_ST_M45PE10:
12219                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12220                         break;
12221                 case FLASH_5752VENDOR_ST_M45PE20:
12222                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12223                         break;
12224                 case FLASH_5752VENDOR_ST_M45PE40:
12225                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12226                         break;
12227                 }
12228                 break;
12229         default:
12230                 tg3_flag_set(tp, NO_NVRAM);
12231                 return;
12232         }
12233
12234         tg3_nvram_get_pagesize(tp, nvcfg1);
12235         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12236                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12237 }
12238
12239
12240 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12241 {
12242         u32 nvcfg1;
12243
12244         nvcfg1 = tr32(NVRAM_CFG1);
12245
12246         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12247         case FLASH_5717VENDOR_ATMEL_EEPROM:
12248         case FLASH_5717VENDOR_MICRO_EEPROM:
12249                 tp->nvram_jedecnum = JEDEC_ATMEL;
12250                 tg3_flag_set(tp, NVRAM_BUFFERED);
12251                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12252
12253                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12254                 tw32(NVRAM_CFG1, nvcfg1);
12255                 return;
12256         case FLASH_5717VENDOR_ATMEL_MDB011D:
12257         case FLASH_5717VENDOR_ATMEL_ADB011B:
12258         case FLASH_5717VENDOR_ATMEL_ADB011D:
12259         case FLASH_5717VENDOR_ATMEL_MDB021D:
12260         case FLASH_5717VENDOR_ATMEL_ADB021B:
12261         case FLASH_5717VENDOR_ATMEL_ADB021D:
12262         case FLASH_5717VENDOR_ATMEL_45USPT:
12263                 tp->nvram_jedecnum = JEDEC_ATMEL;
12264                 tg3_flag_set(tp, NVRAM_BUFFERED);
12265                 tg3_flag_set(tp, FLASH);
12266
12267                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12268                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12269                         /* Detect size with tg3_nvram_get_size() */
12270                         break;
12271                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12272                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12273                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12274                         break;
12275                 default:
12276                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12277                         break;
12278                 }
12279                 break;
12280         case FLASH_5717VENDOR_ST_M_M25PE10:
12281         case FLASH_5717VENDOR_ST_A_M25PE10:
12282         case FLASH_5717VENDOR_ST_M_M45PE10:
12283         case FLASH_5717VENDOR_ST_A_M45PE10:
12284         case FLASH_5717VENDOR_ST_M_M25PE20:
12285         case FLASH_5717VENDOR_ST_A_M25PE20:
12286         case FLASH_5717VENDOR_ST_M_M45PE20:
12287         case FLASH_5717VENDOR_ST_A_M45PE20:
12288         case FLASH_5717VENDOR_ST_25USPT:
12289         case FLASH_5717VENDOR_ST_45USPT:
12290                 tp->nvram_jedecnum = JEDEC_ST;
12291                 tg3_flag_set(tp, NVRAM_BUFFERED);
12292                 tg3_flag_set(tp, FLASH);
12293
12294                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12295                 case FLASH_5717VENDOR_ST_M_M25PE20:
12296                 case FLASH_5717VENDOR_ST_M_M45PE20:
12297                         /* Detect size with tg3_nvram_get_size() */
12298                         break;
12299                 case FLASH_5717VENDOR_ST_A_M25PE20:
12300                 case FLASH_5717VENDOR_ST_A_M45PE20:
12301                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12302                         break;
12303                 default:
12304                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12305                         break;
12306                 }
12307                 break;
12308         default:
12309                 tg3_flag_set(tp, NO_NVRAM);
12310                 return;
12311         }
12312
12313         tg3_nvram_get_pagesize(tp, nvcfg1);
12314         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12315                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12316 }
12317
12318 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12319 {
12320         u32 nvcfg1, nvmpinstrp;
12321
12322         nvcfg1 = tr32(NVRAM_CFG1);
12323         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12324
12325         switch (nvmpinstrp) {
12326         case FLASH_5720_EEPROM_HD:
12327         case FLASH_5720_EEPROM_LD:
12328                 tp->nvram_jedecnum = JEDEC_ATMEL;
12329                 tg3_flag_set(tp, NVRAM_BUFFERED);
12330
12331                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12332                 tw32(NVRAM_CFG1, nvcfg1);
12333                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12334                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12335                 else
12336                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12337                 return;
12338         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12339         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12340         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12341         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12342         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12343         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12344         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12345         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12346         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12347         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12348         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12349         case FLASH_5720VENDOR_ATMEL_45USPT:
12350                 tp->nvram_jedecnum = JEDEC_ATMEL;
12351                 tg3_flag_set(tp, NVRAM_BUFFERED);
12352                 tg3_flag_set(tp, FLASH);
12353
12354                 switch (nvmpinstrp) {
12355                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12356                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12357                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12358                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12359                         break;
12360                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12361                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12362                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12363                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12364                         break;
12365                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12366                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12367                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12368                         break;
12369                 default:
12370                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12371                         break;
12372                 }
12373                 break;
12374         case FLASH_5720VENDOR_M_ST_M25PE10:
12375         case FLASH_5720VENDOR_M_ST_M45PE10:
12376         case FLASH_5720VENDOR_A_ST_M25PE10:
12377         case FLASH_5720VENDOR_A_ST_M45PE10:
12378         case FLASH_5720VENDOR_M_ST_M25PE20:
12379         case FLASH_5720VENDOR_M_ST_M45PE20:
12380         case FLASH_5720VENDOR_A_ST_M25PE20:
12381         case FLASH_5720VENDOR_A_ST_M45PE20:
12382         case FLASH_5720VENDOR_M_ST_M25PE40:
12383         case FLASH_5720VENDOR_M_ST_M45PE40:
12384         case FLASH_5720VENDOR_A_ST_M25PE40:
12385         case FLASH_5720VENDOR_A_ST_M45PE40:
12386         case FLASH_5720VENDOR_M_ST_M25PE80:
12387         case FLASH_5720VENDOR_M_ST_M45PE80:
12388         case FLASH_5720VENDOR_A_ST_M25PE80:
12389         case FLASH_5720VENDOR_A_ST_M45PE80:
12390         case FLASH_5720VENDOR_ST_25USPT:
12391         case FLASH_5720VENDOR_ST_45USPT:
12392                 tp->nvram_jedecnum = JEDEC_ST;
12393                 tg3_flag_set(tp, NVRAM_BUFFERED);
12394                 tg3_flag_set(tp, FLASH);
12395
12396                 switch (nvmpinstrp) {
12397                 case FLASH_5720VENDOR_M_ST_M25PE20:
12398                 case FLASH_5720VENDOR_M_ST_M45PE20:
12399                 case FLASH_5720VENDOR_A_ST_M25PE20:
12400                 case FLASH_5720VENDOR_A_ST_M45PE20:
12401                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12402                         break;
12403                 case FLASH_5720VENDOR_M_ST_M25PE40:
12404                 case FLASH_5720VENDOR_M_ST_M45PE40:
12405                 case FLASH_5720VENDOR_A_ST_M25PE40:
12406                 case FLASH_5720VENDOR_A_ST_M45PE40:
12407                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12408                         break;
12409                 case FLASH_5720VENDOR_M_ST_M25PE80:
12410                 case FLASH_5720VENDOR_M_ST_M45PE80:
12411                 case FLASH_5720VENDOR_A_ST_M25PE80:
12412                 case FLASH_5720VENDOR_A_ST_M45PE80:
12413                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12414                         break;
12415                 default:
12416                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12417                         break;
12418                 }
12419                 break;
12420         default:
12421                 tg3_flag_set(tp, NO_NVRAM);
12422                 return;
12423         }
12424
12425         tg3_nvram_get_pagesize(tp, nvcfg1);
12426         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12427                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12428 }
12429
12430 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12431 static void __devinit tg3_nvram_init(struct tg3 *tp)
12432 {
12433         tw32_f(GRC_EEPROM_ADDR,
12434              (EEPROM_ADDR_FSM_RESET |
12435               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12436                EEPROM_ADDR_CLKPERD_SHIFT)));
12437
12438         msleep(1);
12439
12440         /* Enable seeprom accesses. */
12441         tw32_f(GRC_LOCAL_CTRL,
12442              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12443         udelay(100);
12444
12445         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12446             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12447                 tg3_flag_set(tp, NVRAM);
12448
12449                 if (tg3_nvram_lock(tp)) {
12450                         netdev_warn(tp->dev,
12451                                     "Cannot get nvram lock, %s failed\n",
12452                                     __func__);
12453                         return;
12454                 }
12455                 tg3_enable_nvram_access(tp);
12456
12457                 tp->nvram_size = 0;
12458
12459                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12460                         tg3_get_5752_nvram_info(tp);
12461                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12462                         tg3_get_5755_nvram_info(tp);
12463                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12464                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12465                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12466                         tg3_get_5787_nvram_info(tp);
12467                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12468                         tg3_get_5761_nvram_info(tp);
12469                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12470                         tg3_get_5906_nvram_info(tp);
12471                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12472                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12473                         tg3_get_57780_nvram_info(tp);
12474                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12475                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12476                         tg3_get_5717_nvram_info(tp);
12477                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12478                         tg3_get_5720_nvram_info(tp);
12479                 else
12480                         tg3_get_nvram_info(tp);
12481
12482                 if (tp->nvram_size == 0)
12483                         tg3_get_nvram_size(tp);
12484
12485                 tg3_disable_nvram_access(tp);
12486                 tg3_nvram_unlock(tp);
12487
12488         } else {
12489                 tg3_flag_clear(tp, NVRAM);
12490                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12491
12492                 tg3_get_eeprom_size(tp);
12493         }
12494 }
12495
12496 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12497                                     u32 offset, u32 len, u8 *buf)
12498 {
12499         int i, j, rc = 0;
12500         u32 val;
12501
12502         for (i = 0; i < len; i += 4) {
12503                 u32 addr;
12504                 __be32 data;
12505
12506                 addr = offset + i;
12507
12508                 memcpy(&data, buf + i, 4);
12509
12510                 /*
12511                  * The SEEPROM interface expects the data to always be opposite
12512                  * the native endian format.  We accomplish this by reversing
12513                  * all the operations that would have been performed on the
12514                  * data from a call to tg3_nvram_read_be32().
12515                  */
12516                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12517
12518                 val = tr32(GRC_EEPROM_ADDR);
12519                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12520
12521                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12522                         EEPROM_ADDR_READ);
12523                 tw32(GRC_EEPROM_ADDR, val |
12524                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12525                         (addr & EEPROM_ADDR_ADDR_MASK) |
12526                         EEPROM_ADDR_START |
12527                         EEPROM_ADDR_WRITE);
12528
12529                 for (j = 0; j < 1000; j++) {
12530                         val = tr32(GRC_EEPROM_ADDR);
12531
12532                         if (val & EEPROM_ADDR_COMPLETE)
12533                                 break;
12534                         msleep(1);
12535                 }
12536                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12537                         rc = -EBUSY;
12538                         break;
12539                 }
12540         }
12541
12542         return rc;
12543 }
12544
12545 /* offset and length are dword aligned */
12546 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12547                 u8 *buf)
12548 {
12549         int ret = 0;
12550         u32 pagesize = tp->nvram_pagesize;
12551         u32 pagemask = pagesize - 1;
12552         u32 nvram_cmd;
12553         u8 *tmp;
12554
12555         tmp = kmalloc(pagesize, GFP_KERNEL);
12556         if (tmp == NULL)
12557                 return -ENOMEM;
12558
12559         while (len) {
12560                 int j;
12561                 u32 phy_addr, page_off, size;
12562
12563                 phy_addr = offset & ~pagemask;
12564
12565                 for (j = 0; j < pagesize; j += 4) {
12566                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12567                                                   (__be32 *) (tmp + j));
12568                         if (ret)
12569                                 break;
12570                 }
12571                 if (ret)
12572                         break;
12573
12574                 page_off = offset & pagemask;
12575                 size = pagesize;
12576                 if (len < size)
12577                         size = len;
12578
12579                 len -= size;
12580
12581                 memcpy(tmp + page_off, buf, size);
12582
12583                 offset = offset + (pagesize - page_off);
12584
12585                 tg3_enable_nvram_access(tp);
12586
12587                 /*
12588                  * Before we can erase the flash page, we need
12589                  * to issue a special "write enable" command.
12590                  */
12591                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12592
12593                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12594                         break;
12595
12596                 /* Erase the target page */
12597                 tw32(NVRAM_ADDR, phy_addr);
12598
12599                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12600                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12601
12602                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12603                         break;
12604
12605                 /* Issue another write enable to start the write. */
12606                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12607
12608                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12609                         break;
12610
12611                 for (j = 0; j < pagesize; j += 4) {
12612                         __be32 data;
12613
12614                         data = *((__be32 *) (tmp + j));
12615
12616                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12617
12618                         tw32(NVRAM_ADDR, phy_addr + j);
12619
12620                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12621                                 NVRAM_CMD_WR;
12622
12623                         if (j == 0)
12624                                 nvram_cmd |= NVRAM_CMD_FIRST;
12625                         else if (j == (pagesize - 4))
12626                                 nvram_cmd |= NVRAM_CMD_LAST;
12627
12628                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12629                                 break;
12630                 }
12631                 if (ret)
12632                         break;
12633         }
12634
12635         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12636         tg3_nvram_exec_cmd(tp, nvram_cmd);
12637
12638         kfree(tmp);
12639
12640         return ret;
12641 }
12642
12643 /* offset and length are dword aligned */
12644 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12645                 u8 *buf)
12646 {
12647         int i, ret = 0;
12648
12649         for (i = 0; i < len; i += 4, offset += 4) {
12650                 u32 page_off, phy_addr, nvram_cmd;
12651                 __be32 data;
12652
12653                 memcpy(&data, buf + i, 4);
12654                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12655
12656                 page_off = offset % tp->nvram_pagesize;
12657
12658                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12659
12660                 tw32(NVRAM_ADDR, phy_addr);
12661
12662                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12663
12664                 if (page_off == 0 || i == 0)
12665                         nvram_cmd |= NVRAM_CMD_FIRST;
12666                 if (page_off == (tp->nvram_pagesize - 4))
12667                         nvram_cmd |= NVRAM_CMD_LAST;
12668
12669                 if (i == (len - 4))
12670                         nvram_cmd |= NVRAM_CMD_LAST;
12671
12672                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12673                     !tg3_flag(tp, 5755_PLUS) &&
12674                     (tp->nvram_jedecnum == JEDEC_ST) &&
12675                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12676
12677                         if ((ret = tg3_nvram_exec_cmd(tp,
12678                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12679                                 NVRAM_CMD_DONE)))
12680
12681                                 break;
12682                 }
12683                 if (!tg3_flag(tp, FLASH)) {
12684                         /* We always do complete word writes to eeprom. */
12685                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12686                 }
12687
12688                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12689                         break;
12690         }
12691         return ret;
12692 }
12693
12694 /* offset and length are dword aligned */
12695 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12696 {
12697         int ret;
12698
12699         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12700                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12701                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12702                 udelay(40);
12703         }
12704
12705         if (!tg3_flag(tp, NVRAM)) {
12706                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12707         } else {
12708                 u32 grc_mode;
12709
12710                 ret = tg3_nvram_lock(tp);
12711                 if (ret)
12712                         return ret;
12713
12714                 tg3_enable_nvram_access(tp);
12715                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12716                         tw32(NVRAM_WRITE1, 0x406);
12717
12718                 grc_mode = tr32(GRC_MODE);
12719                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12720
12721                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12722                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12723                                 buf);
12724                 } else {
12725                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12726                                 buf);
12727                 }
12728
12729                 grc_mode = tr32(GRC_MODE);
12730                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12731
12732                 tg3_disable_nvram_access(tp);
12733                 tg3_nvram_unlock(tp);
12734         }
12735
12736         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12737                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12738                 udelay(40);
12739         }
12740
12741         return ret;
12742 }
12743
12744 struct subsys_tbl_ent {
12745         u16 subsys_vendor, subsys_devid;
12746         u32 phy_id;
12747 };
12748
12749 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12750         /* Broadcom boards. */
12751         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12752           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12753         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12754           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12755         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12756           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12757         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12758           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12759         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12760           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12761         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12762           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12763         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12764           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12765         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12766           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12767         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12768           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12769         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12770           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12771         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12772           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12773
12774         /* 3com boards. */
12775         { TG3PCI_SUBVENDOR_ID_3COM,
12776           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12777         { TG3PCI_SUBVENDOR_ID_3COM,
12778           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12779         { TG3PCI_SUBVENDOR_ID_3COM,
12780           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12781         { TG3PCI_SUBVENDOR_ID_3COM,
12782           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12783         { TG3PCI_SUBVENDOR_ID_3COM,
12784           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12785
12786         /* DELL boards. */
12787         { TG3PCI_SUBVENDOR_ID_DELL,
12788           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12789         { TG3PCI_SUBVENDOR_ID_DELL,
12790           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12791         { TG3PCI_SUBVENDOR_ID_DELL,
12792           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12793         { TG3PCI_SUBVENDOR_ID_DELL,
12794           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12795
12796         /* Compaq boards. */
12797         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12798           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12799         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12800           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12801         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12802           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12803         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12804           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12805         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12806           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12807
12808         /* IBM boards. */
12809         { TG3PCI_SUBVENDOR_ID_IBM,
12810           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12811 };
12812
12813 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12814 {
12815         int i;
12816
12817         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12818                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12819                      tp->pdev->subsystem_vendor) &&
12820                     (subsys_id_to_phy_id[i].subsys_devid ==
12821                      tp->pdev->subsystem_device))
12822                         return &subsys_id_to_phy_id[i];
12823         }
12824         return NULL;
12825 }
12826
12827 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12828 {
12829         u32 val;
12830
12831         tp->phy_id = TG3_PHY_ID_INVALID;
12832         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12833
12834         /* Assume an onboard device and WOL capable by default.  */
12835         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12836         tg3_flag_set(tp, WOL_CAP);
12837
12838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12839                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12840                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12841                         tg3_flag_set(tp, IS_NIC);
12842                 }
12843                 val = tr32(VCPU_CFGSHDW);
12844                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12845                         tg3_flag_set(tp, ASPM_WORKAROUND);
12846                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12847                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12848                         tg3_flag_set(tp, WOL_ENABLE);
12849                         device_set_wakeup_enable(&tp->pdev->dev, true);
12850                 }
12851                 goto done;
12852         }
12853
12854         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12855         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12856                 u32 nic_cfg, led_cfg;
12857                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12858                 int eeprom_phy_serdes = 0;
12859
12860                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12861                 tp->nic_sram_data_cfg = nic_cfg;
12862
12863                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12864                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12865                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12866                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12867                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12868                     (ver > 0) && (ver < 0x100))
12869                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12870
12871                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12872                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12873
12874                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12875                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12876                         eeprom_phy_serdes = 1;
12877
12878                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12879                 if (nic_phy_id != 0) {
12880                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12881                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12882
12883                         eeprom_phy_id  = (id1 >> 16) << 10;
12884                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12885                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12886                 } else
12887                         eeprom_phy_id = 0;
12888
12889                 tp->phy_id = eeprom_phy_id;
12890                 if (eeprom_phy_serdes) {
12891                         if (!tg3_flag(tp, 5705_PLUS))
12892                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12893                         else
12894                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12895                 }
12896
12897                 if (tg3_flag(tp, 5750_PLUS))
12898                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12899                                     SHASTA_EXT_LED_MODE_MASK);
12900                 else
12901                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12902
12903                 switch (led_cfg) {
12904                 default:
12905                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12906                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12907                         break;
12908
12909                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12910                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12911                         break;
12912
12913                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12914                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12915
12916                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12917                          * read on some older 5700/5701 bootcode.
12918                          */
12919                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12920                             ASIC_REV_5700 ||
12921                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12922                             ASIC_REV_5701)
12923                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12924
12925                         break;
12926
12927                 case SHASTA_EXT_LED_SHARED:
12928                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12929                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12930                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12931                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12932                                                  LED_CTRL_MODE_PHY_2);
12933                         break;
12934
12935                 case SHASTA_EXT_LED_MAC:
12936                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12937                         break;
12938
12939                 case SHASTA_EXT_LED_COMBO:
12940                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12941                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12942                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12943                                                  LED_CTRL_MODE_PHY_2);
12944                         break;
12945
12946                 }
12947
12948                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12949                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12950                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12951                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12952
12953                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12954                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12955
12956                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12957                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12958                         if ((tp->pdev->subsystem_vendor ==
12959                              PCI_VENDOR_ID_ARIMA) &&
12960                             (tp->pdev->subsystem_device == 0x205a ||
12961                              tp->pdev->subsystem_device == 0x2063))
12962                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12963                 } else {
12964                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12965                         tg3_flag_set(tp, IS_NIC);
12966                 }
12967
12968                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12969                         tg3_flag_set(tp, ENABLE_ASF);
12970                         if (tg3_flag(tp, 5750_PLUS))
12971                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12972                 }
12973
12974                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12975                     tg3_flag(tp, 5750_PLUS))
12976                         tg3_flag_set(tp, ENABLE_APE);
12977
12978                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12979                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12980                         tg3_flag_clear(tp, WOL_CAP);
12981
12982                 if (tg3_flag(tp, WOL_CAP) &&
12983                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12984                         tg3_flag_set(tp, WOL_ENABLE);
12985                         device_set_wakeup_enable(&tp->pdev->dev, true);
12986                 }
12987
12988                 if (cfg2 & (1 << 17))
12989                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12990
12991                 /* serdes signal pre-emphasis in register 0x590 set by */
12992                 /* bootcode if bit 18 is set */
12993                 if (cfg2 & (1 << 18))
12994                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12995
12996                 if ((tg3_flag(tp, 57765_PLUS) ||
12997                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12998                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12999                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13000                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13001
13002                 if (tg3_flag(tp, PCI_EXPRESS) &&
13003                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13004                     !tg3_flag(tp, 57765_PLUS)) {
13005                         u32 cfg3;
13006
13007                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13008                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13009                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13010                 }
13011
13012                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13013                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13014                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13015                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13016                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13017                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13018         }
13019 done:
13020         if (tg3_flag(tp, WOL_CAP))
13021                 device_set_wakeup_enable(&tp->pdev->dev,
13022                                          tg3_flag(tp, WOL_ENABLE));
13023         else
13024                 device_set_wakeup_capable(&tp->pdev->dev, false);
13025 }
13026
13027 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13028 {
13029         int i;
13030         u32 val;
13031
13032         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13033         tw32(OTP_CTRL, cmd);
13034
13035         /* Wait for up to 1 ms for command to execute. */
13036         for (i = 0; i < 100; i++) {
13037                 val = tr32(OTP_STATUS);
13038                 if (val & OTP_STATUS_CMD_DONE)
13039                         break;
13040                 udelay(10);
13041         }
13042
13043         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13044 }
13045
13046 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13047  * configuration is a 32-bit value that straddles the alignment boundary.
13048  * We do two 32-bit reads and then shift and merge the results.
13049  */
13050 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13051 {
13052         u32 bhalf_otp, thalf_otp;
13053
13054         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13055
13056         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13057                 return 0;
13058
13059         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13060
13061         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13062                 return 0;
13063
13064         thalf_otp = tr32(OTP_READ_DATA);
13065
13066         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13067
13068         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13069                 return 0;
13070
13071         bhalf_otp = tr32(OTP_READ_DATA);
13072
13073         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13074 }
13075
13076 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13077 {
13078         u32 adv = ADVERTISED_Autoneg |
13079                   ADVERTISED_Pause;
13080
13081         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13082                 adv |= ADVERTISED_1000baseT_Half |
13083                        ADVERTISED_1000baseT_Full;
13084
13085         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13086                 adv |= ADVERTISED_100baseT_Half |
13087                        ADVERTISED_100baseT_Full |
13088                        ADVERTISED_10baseT_Half |
13089                        ADVERTISED_10baseT_Full |
13090                        ADVERTISED_TP;
13091         else
13092                 adv |= ADVERTISED_FIBRE;
13093
13094         tp->link_config.advertising = adv;
13095         tp->link_config.speed = SPEED_INVALID;
13096         tp->link_config.duplex = DUPLEX_INVALID;
13097         tp->link_config.autoneg = AUTONEG_ENABLE;
13098         tp->link_config.active_speed = SPEED_INVALID;
13099         tp->link_config.active_duplex = DUPLEX_INVALID;
13100         tp->link_config.orig_speed = SPEED_INVALID;
13101         tp->link_config.orig_duplex = DUPLEX_INVALID;
13102         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13103 }
13104
13105 static int __devinit tg3_phy_probe(struct tg3 *tp)
13106 {
13107         u32 hw_phy_id_1, hw_phy_id_2;
13108         u32 hw_phy_id, hw_phy_id_masked;
13109         int err;
13110
13111         /* flow control autonegotiation is default behavior */
13112         tg3_flag_set(tp, PAUSE_AUTONEG);
13113         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13114
13115         if (tg3_flag(tp, USE_PHYLIB))
13116                 return tg3_phy_init(tp);
13117
13118         /* Reading the PHY ID register can conflict with ASF
13119          * firmware access to the PHY hardware.
13120          */
13121         err = 0;
13122         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13123                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13124         } else {
13125                 /* Now read the physical PHY_ID from the chip and verify
13126                  * that it is sane.  If it doesn't look good, we fall back
13127                  * to either the hard-coded table based PHY_ID and failing
13128                  * that the value found in the eeprom area.
13129                  */
13130                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13131                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13132
13133                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13134                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13135                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13136
13137                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13138         }
13139
13140         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13141                 tp->phy_id = hw_phy_id;
13142                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13143                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13144                 else
13145                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13146         } else {
13147                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13148                         /* Do nothing, phy ID already set up in
13149                          * tg3_get_eeprom_hw_cfg().
13150                          */
13151                 } else {
13152                         struct subsys_tbl_ent *p;
13153
13154                         /* No eeprom signature?  Try the hardcoded
13155                          * subsys device table.
13156                          */
13157                         p = tg3_lookup_by_subsys(tp);
13158                         if (!p)
13159                                 return -ENODEV;
13160
13161                         tp->phy_id = p->phy_id;
13162                         if (!tp->phy_id ||
13163                             tp->phy_id == TG3_PHY_ID_BCM8002)
13164                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13165                 }
13166         }
13167
13168         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13169             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13170              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13171              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13172               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13173              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13174               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13175                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13176
13177         tg3_phy_init_link_config(tp);
13178
13179         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13180             !tg3_flag(tp, ENABLE_APE) &&
13181             !tg3_flag(tp, ENABLE_ASF)) {
13182                 u32 bmsr, mask;
13183
13184                 tg3_readphy(tp, MII_BMSR, &bmsr);
13185                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13186                     (bmsr & BMSR_LSTATUS))
13187                         goto skip_phy_reset;
13188
13189                 err = tg3_phy_reset(tp);
13190                 if (err)
13191                         return err;
13192
13193                 tg3_phy_set_wirespeed(tp);
13194
13195                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13196                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13197                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13198                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13199                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13200                                             tp->link_config.flowctrl);
13201
13202                         tg3_writephy(tp, MII_BMCR,
13203                                      BMCR_ANENABLE | BMCR_ANRESTART);
13204                 }
13205         }
13206
13207 skip_phy_reset:
13208         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13209                 err = tg3_init_5401phy_dsp(tp);
13210                 if (err)
13211                         return err;
13212
13213                 err = tg3_init_5401phy_dsp(tp);
13214         }
13215
13216         return err;
13217 }
13218
13219 static void __devinit tg3_read_vpd(struct tg3 *tp)
13220 {
13221         u8 *vpd_data;
13222         unsigned int block_end, rosize, len;
13223         u32 vpdlen;
13224         int j, i = 0;
13225
13226         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13227         if (!vpd_data)
13228                 goto out_no_vpd;
13229
13230         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13231         if (i < 0)
13232                 goto out_not_found;
13233
13234         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13235         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13236         i += PCI_VPD_LRDT_TAG_SIZE;
13237
13238         if (block_end > vpdlen)
13239                 goto out_not_found;
13240
13241         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13242                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13243         if (j > 0) {
13244                 len = pci_vpd_info_field_size(&vpd_data[j]);
13245
13246                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13247                 if (j + len > block_end || len != 4 ||
13248                     memcmp(&vpd_data[j], "1028", 4))
13249                         goto partno;
13250
13251                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13252                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13253                 if (j < 0)
13254                         goto partno;
13255
13256                 len = pci_vpd_info_field_size(&vpd_data[j]);
13257
13258                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13259                 if (j + len > block_end)
13260                         goto partno;
13261
13262                 memcpy(tp->fw_ver, &vpd_data[j], len);
13263                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13264         }
13265
13266 partno:
13267         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13268                                       PCI_VPD_RO_KEYWORD_PARTNO);
13269         if (i < 0)
13270                 goto out_not_found;
13271
13272         len = pci_vpd_info_field_size(&vpd_data[i]);
13273
13274         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13275         if (len > TG3_BPN_SIZE ||
13276             (len + i) > vpdlen)
13277                 goto out_not_found;
13278
13279         memcpy(tp->board_part_number, &vpd_data[i], len);
13280
13281 out_not_found:
13282         kfree(vpd_data);
13283         if (tp->board_part_number[0])
13284                 return;
13285
13286 out_no_vpd:
13287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13288                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13289                         strcpy(tp->board_part_number, "BCM5717");
13290                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13291                         strcpy(tp->board_part_number, "BCM5718");
13292                 else
13293                         goto nomatch;
13294         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13295                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13296                         strcpy(tp->board_part_number, "BCM57780");
13297                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13298                         strcpy(tp->board_part_number, "BCM57760");
13299                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13300                         strcpy(tp->board_part_number, "BCM57790");
13301                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13302                         strcpy(tp->board_part_number, "BCM57788");
13303                 else
13304                         goto nomatch;
13305         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13306                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13307                         strcpy(tp->board_part_number, "BCM57761");
13308                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13309                         strcpy(tp->board_part_number, "BCM57765");
13310                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13311                         strcpy(tp->board_part_number, "BCM57781");
13312                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13313                         strcpy(tp->board_part_number, "BCM57785");
13314                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13315                         strcpy(tp->board_part_number, "BCM57791");
13316                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13317                         strcpy(tp->board_part_number, "BCM57795");
13318                 else
13319                         goto nomatch;
13320         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13321                 strcpy(tp->board_part_number, "BCM95906");
13322         } else {
13323 nomatch:
13324                 strcpy(tp->board_part_number, "none");
13325         }
13326 }
13327
13328 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13329 {
13330         u32 val;
13331
13332         if (tg3_nvram_read(tp, offset, &val) ||
13333             (val & 0xfc000000) != 0x0c000000 ||
13334             tg3_nvram_read(tp, offset + 4, &val) ||
13335             val != 0)
13336                 return 0;
13337
13338         return 1;
13339 }
13340
13341 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13342 {
13343         u32 val, offset, start, ver_offset;
13344         int i, dst_off;
13345         bool newver = false;
13346
13347         if (tg3_nvram_read(tp, 0xc, &offset) ||
13348             tg3_nvram_read(tp, 0x4, &start))
13349                 return;
13350
13351         offset = tg3_nvram_logical_addr(tp, offset);
13352
13353         if (tg3_nvram_read(tp, offset, &val))
13354                 return;
13355
13356         if ((val & 0xfc000000) == 0x0c000000) {
13357                 if (tg3_nvram_read(tp, offset + 4, &val))
13358                         return;
13359
13360                 if (val == 0)
13361                         newver = true;
13362         }
13363
13364         dst_off = strlen(tp->fw_ver);
13365
13366         if (newver) {
13367                 if (TG3_VER_SIZE - dst_off < 16 ||
13368                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13369                         return;
13370
13371                 offset = offset + ver_offset - start;
13372                 for (i = 0; i < 16; i += 4) {
13373                         __be32 v;
13374                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13375                                 return;
13376
13377                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13378                 }
13379         } else {
13380                 u32 major, minor;
13381
13382                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13383                         return;
13384
13385                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13386                         TG3_NVM_BCVER_MAJSFT;
13387                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13388                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13389                          "v%d.%02d", major, minor);
13390         }
13391 }
13392
13393 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13394 {
13395         u32 val, major, minor;
13396
13397         /* Use native endian representation */
13398         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13399                 return;
13400
13401         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13402                 TG3_NVM_HWSB_CFG1_MAJSFT;
13403         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13404                 TG3_NVM_HWSB_CFG1_MINSFT;
13405
13406         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13407 }
13408
13409 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13410 {
13411         u32 offset, major, minor, build;
13412
13413         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13414
13415         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13416                 return;
13417
13418         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13419         case TG3_EEPROM_SB_REVISION_0:
13420                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13421                 break;
13422         case TG3_EEPROM_SB_REVISION_2:
13423                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13424                 break;
13425         case TG3_EEPROM_SB_REVISION_3:
13426                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13427                 break;
13428         case TG3_EEPROM_SB_REVISION_4:
13429                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13430                 break;
13431         case TG3_EEPROM_SB_REVISION_5:
13432                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13433                 break;
13434         case TG3_EEPROM_SB_REVISION_6:
13435                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13436                 break;
13437         default:
13438                 return;
13439         }
13440
13441         if (tg3_nvram_read(tp, offset, &val))
13442                 return;
13443
13444         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13445                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13446         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13447                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13448         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13449
13450         if (minor > 99 || build > 26)
13451                 return;
13452
13453         offset = strlen(tp->fw_ver);
13454         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13455                  " v%d.%02d", major, minor);
13456
13457         if (build > 0) {
13458                 offset = strlen(tp->fw_ver);
13459                 if (offset < TG3_VER_SIZE - 1)
13460                         tp->fw_ver[offset] = 'a' + build - 1;
13461         }
13462 }
13463
13464 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13465 {
13466         u32 val, offset, start;
13467         int i, vlen;
13468
13469         for (offset = TG3_NVM_DIR_START;
13470              offset < TG3_NVM_DIR_END;
13471              offset += TG3_NVM_DIRENT_SIZE) {
13472                 if (tg3_nvram_read(tp, offset, &val))
13473                         return;
13474
13475                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13476                         break;
13477         }
13478
13479         if (offset == TG3_NVM_DIR_END)
13480                 return;
13481
13482         if (!tg3_flag(tp, 5705_PLUS))
13483                 start = 0x08000000;
13484         else if (tg3_nvram_read(tp, offset - 4, &start))
13485                 return;
13486
13487         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13488             !tg3_fw_img_is_valid(tp, offset) ||
13489             tg3_nvram_read(tp, offset + 8, &val))
13490                 return;
13491
13492         offset += val - start;
13493
13494         vlen = strlen(tp->fw_ver);
13495
13496         tp->fw_ver[vlen++] = ',';
13497         tp->fw_ver[vlen++] = ' ';
13498
13499         for (i = 0; i < 4; i++) {
13500                 __be32 v;
13501                 if (tg3_nvram_read_be32(tp, offset, &v))
13502                         return;
13503
13504                 offset += sizeof(v);
13505
13506                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13507                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13508                         break;
13509                 }
13510
13511                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13512                 vlen += sizeof(v);
13513         }
13514 }
13515
13516 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13517 {
13518         int vlen;
13519         u32 apedata;
13520         char *fwtype;
13521
13522         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13523                 return;
13524
13525         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13526         if (apedata != APE_SEG_SIG_MAGIC)
13527                 return;
13528
13529         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13530         if (!(apedata & APE_FW_STATUS_READY))
13531                 return;
13532
13533         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13534
13535         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13536                 tg3_flag_set(tp, APE_HAS_NCSI);
13537                 fwtype = "NCSI";
13538         } else {
13539                 fwtype = "DASH";
13540         }
13541
13542         vlen = strlen(tp->fw_ver);
13543
13544         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13545                  fwtype,
13546                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13547                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13548                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13549                  (apedata & APE_FW_VERSION_BLDMSK));
13550 }
13551
13552 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13553 {
13554         u32 val;
13555         bool vpd_vers = false;
13556
13557         if (tp->fw_ver[0] != 0)
13558                 vpd_vers = true;
13559
13560         if (tg3_flag(tp, NO_NVRAM)) {
13561                 strcat(tp->fw_ver, "sb");
13562                 return;
13563         }
13564
13565         if (tg3_nvram_read(tp, 0, &val))
13566                 return;
13567
13568         if (val == TG3_EEPROM_MAGIC)
13569                 tg3_read_bc_ver(tp);
13570         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13571                 tg3_read_sb_ver(tp, val);
13572         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13573                 tg3_read_hwsb_ver(tp);
13574         else
13575                 return;
13576
13577         if (vpd_vers)
13578                 goto done;
13579
13580         if (tg3_flag(tp, ENABLE_APE)) {
13581                 if (tg3_flag(tp, ENABLE_ASF))
13582                         tg3_read_dash_ver(tp);
13583         } else if (tg3_flag(tp, ENABLE_ASF)) {
13584                 tg3_read_mgmtfw_ver(tp);
13585         }
13586
13587 done:
13588         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13589 }
13590
13591 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13592
13593 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13594 {
13595         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13596                 return TG3_RX_RET_MAX_SIZE_5717;
13597         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13598                 return TG3_RX_RET_MAX_SIZE_5700;
13599         else
13600                 return TG3_RX_RET_MAX_SIZE_5705;
13601 }
13602
13603 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13604         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13605         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13606         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13607         { },
13608 };
13609
13610 static int __devinit tg3_get_invariants(struct tg3 *tp)
13611 {
13612         u32 misc_ctrl_reg;
13613         u32 pci_state_reg, grc_misc_cfg;
13614         u32 val;
13615         u16 pci_cmd;
13616         int err;
13617
13618         /* Force memory write invalidate off.  If we leave it on,
13619          * then on 5700_BX chips we have to enable a workaround.
13620          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13621          * to match the cacheline size.  The Broadcom driver have this
13622          * workaround but turns MWI off all the times so never uses
13623          * it.  This seems to suggest that the workaround is insufficient.
13624          */
13625         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13626         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13627         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13628
13629         /* Important! -- Make sure register accesses are byteswapped
13630          * correctly.  Also, for those chips that require it, make
13631          * sure that indirect register accesses are enabled before
13632          * the first operation.
13633          */
13634         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13635                               &misc_ctrl_reg);
13636         tp->misc_host_ctrl |= (misc_ctrl_reg &
13637                                MISC_HOST_CTRL_CHIPREV);
13638         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13639                                tp->misc_host_ctrl);
13640
13641         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13642                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13644                 u32 prod_id_asic_rev;
13645
13646                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13647                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13648                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13649                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13650                         pci_read_config_dword(tp->pdev,
13651                                               TG3PCI_GEN2_PRODID_ASICREV,
13652                                               &prod_id_asic_rev);
13653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13654                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13655                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13656                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13657                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13658                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13659                         pci_read_config_dword(tp->pdev,
13660                                               TG3PCI_GEN15_PRODID_ASICREV,
13661                                               &prod_id_asic_rev);
13662                 else
13663                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13664                                               &prod_id_asic_rev);
13665
13666                 tp->pci_chip_rev_id = prod_id_asic_rev;
13667         }
13668
13669         /* Wrong chip ID in 5752 A0. This code can be removed later
13670          * as A0 is not in production.
13671          */
13672         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13673                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13674
13675         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13676          * we need to disable memory and use config. cycles
13677          * only to access all registers. The 5702/03 chips
13678          * can mistakenly decode the special cycles from the
13679          * ICH chipsets as memory write cycles, causing corruption
13680          * of register and memory space. Only certain ICH bridges
13681          * will drive special cycles with non-zero data during the
13682          * address phase which can fall within the 5703's address
13683          * range. This is not an ICH bug as the PCI spec allows
13684          * non-zero address during special cycles. However, only
13685          * these ICH bridges are known to drive non-zero addresses
13686          * during special cycles.
13687          *
13688          * Since special cycles do not cross PCI bridges, we only
13689          * enable this workaround if the 5703 is on the secondary
13690          * bus of these ICH bridges.
13691          */
13692         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13693             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13694                 static struct tg3_dev_id {
13695                         u32     vendor;
13696                         u32     device;
13697                         u32     rev;
13698                 } ich_chipsets[] = {
13699                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13700                           PCI_ANY_ID },
13701                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13702                           PCI_ANY_ID },
13703                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13704                           0xa },
13705                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13706                           PCI_ANY_ID },
13707                         { },
13708                 };
13709                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13710                 struct pci_dev *bridge = NULL;
13711
13712                 while (pci_id->vendor != 0) {
13713                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13714                                                 bridge);
13715                         if (!bridge) {
13716                                 pci_id++;
13717                                 continue;
13718                         }
13719                         if (pci_id->rev != PCI_ANY_ID) {
13720                                 if (bridge->revision > pci_id->rev)
13721                                         continue;
13722                         }
13723                         if (bridge->subordinate &&
13724                             (bridge->subordinate->number ==
13725                              tp->pdev->bus->number)) {
13726                                 tg3_flag_set(tp, ICH_WORKAROUND);
13727                                 pci_dev_put(bridge);
13728                                 break;
13729                         }
13730                 }
13731         }
13732
13733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13734                 static struct tg3_dev_id {
13735                         u32     vendor;
13736                         u32     device;
13737                 } bridge_chipsets[] = {
13738                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13739                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13740                         { },
13741                 };
13742                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13743                 struct pci_dev *bridge = NULL;
13744
13745                 while (pci_id->vendor != 0) {
13746                         bridge = pci_get_device(pci_id->vendor,
13747                                                 pci_id->device,
13748                                                 bridge);
13749                         if (!bridge) {
13750                                 pci_id++;
13751                                 continue;
13752                         }
13753                         if (bridge->subordinate &&
13754                             (bridge->subordinate->number <=
13755                              tp->pdev->bus->number) &&
13756                             (bridge->subordinate->subordinate >=
13757                              tp->pdev->bus->number)) {
13758                                 tg3_flag_set(tp, 5701_DMA_BUG);
13759                                 pci_dev_put(bridge);
13760                                 break;
13761                         }
13762                 }
13763         }
13764
13765         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13766          * DMA addresses > 40-bit. This bridge may have other additional
13767          * 57xx devices behind it in some 4-port NIC designs for example.
13768          * Any tg3 device found behind the bridge will also need the 40-bit
13769          * DMA workaround.
13770          */
13771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13772             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13773                 tg3_flag_set(tp, 5780_CLASS);
13774                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13775                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13776         } else {
13777                 struct pci_dev *bridge = NULL;
13778
13779                 do {
13780                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13781                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13782                                                 bridge);
13783                         if (bridge && bridge->subordinate &&
13784                             (bridge->subordinate->number <=
13785                              tp->pdev->bus->number) &&
13786                             (bridge->subordinate->subordinate >=
13787                              tp->pdev->bus->number)) {
13788                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13789                                 pci_dev_put(bridge);
13790                                 break;
13791                         }
13792                 } while (bridge);
13793         }
13794
13795         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13796             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13797                 tp->pdev_peer = tg3_find_peer(tp);
13798
13799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13801             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13802                 tg3_flag_set(tp, 5717_PLUS);
13803
13804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13805             tg3_flag(tp, 5717_PLUS))
13806                 tg3_flag_set(tp, 57765_PLUS);
13807
13808         /* Intentionally exclude ASIC_REV_5906 */
13809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13813             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13815             tg3_flag(tp, 57765_PLUS))
13816                 tg3_flag_set(tp, 5755_PLUS);
13817
13818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13819             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13820             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13821             tg3_flag(tp, 5755_PLUS) ||
13822             tg3_flag(tp, 5780_CLASS))
13823                 tg3_flag_set(tp, 5750_PLUS);
13824
13825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13826             tg3_flag(tp, 5750_PLUS))
13827                 tg3_flag_set(tp, 5705_PLUS);
13828
13829         /* Determine TSO capabilities */
13830         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13831                 ; /* Do nothing. HW bug. */
13832         else if (tg3_flag(tp, 57765_PLUS))
13833                 tg3_flag_set(tp, HW_TSO_3);
13834         else if (tg3_flag(tp, 5755_PLUS) ||
13835                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13836                 tg3_flag_set(tp, HW_TSO_2);
13837         else if (tg3_flag(tp, 5750_PLUS)) {
13838                 tg3_flag_set(tp, HW_TSO_1);
13839                 tg3_flag_set(tp, TSO_BUG);
13840                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13841                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13842                         tg3_flag_clear(tp, TSO_BUG);
13843         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13844                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13845                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13846                         tg3_flag_set(tp, TSO_BUG);
13847                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13848                         tp->fw_needed = FIRMWARE_TG3TSO5;
13849                 else
13850                         tp->fw_needed = FIRMWARE_TG3TSO;
13851         }
13852
13853         /* Selectively allow TSO based on operating conditions */
13854         if (tg3_flag(tp, HW_TSO_1) ||
13855             tg3_flag(tp, HW_TSO_2) ||
13856             tg3_flag(tp, HW_TSO_3) ||
13857             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13858                 tg3_flag_set(tp, TSO_CAPABLE);
13859         else {
13860                 tg3_flag_clear(tp, TSO_CAPABLE);
13861                 tg3_flag_clear(tp, TSO_BUG);
13862                 tp->fw_needed = NULL;
13863         }
13864
13865         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13866                 tp->fw_needed = FIRMWARE_TG3;
13867
13868         tp->irq_max = 1;
13869
13870         if (tg3_flag(tp, 5750_PLUS)) {
13871                 tg3_flag_set(tp, SUPPORT_MSI);
13872                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13873                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13874                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13875                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13876                      tp->pdev_peer == tp->pdev))
13877                         tg3_flag_clear(tp, SUPPORT_MSI);
13878
13879                 if (tg3_flag(tp, 5755_PLUS) ||
13880                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13881                         tg3_flag_set(tp, 1SHOT_MSI);
13882                 }
13883
13884                 if (tg3_flag(tp, 57765_PLUS)) {
13885                         tg3_flag_set(tp, SUPPORT_MSIX);
13886                         tp->irq_max = TG3_IRQ_MAX_VECS;
13887                 }
13888         }
13889
13890         if (tg3_flag(tp, 5755_PLUS))
13891                 tg3_flag_set(tp, SHORT_DMA_BUG);
13892
13893         if (tg3_flag(tp, 5717_PLUS))
13894                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13895
13896         if (tg3_flag(tp, 57765_PLUS) &&
13897             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13898                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13899
13900         if (!tg3_flag(tp, 5705_PLUS) ||
13901             tg3_flag(tp, 5780_CLASS) ||
13902             tg3_flag(tp, USE_JUMBO_BDFLAG))
13903                 tg3_flag_set(tp, JUMBO_CAPABLE);
13904
13905         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13906                               &pci_state_reg);
13907
13908         if (pci_is_pcie(tp->pdev)) {
13909                 u16 lnkctl;
13910
13911                 tg3_flag_set(tp, PCI_EXPRESS);
13912
13913                 tp->pcie_readrq = 4096;
13914                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13915                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13916                         tp->pcie_readrq = 2048;
13917
13918                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13919
13920                 pci_read_config_word(tp->pdev,
13921                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13922                                      &lnkctl);
13923                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13924                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13925                             ASIC_REV_5906) {
13926                                 tg3_flag_clear(tp, HW_TSO_2);
13927                                 tg3_flag_clear(tp, TSO_CAPABLE);
13928                         }
13929                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13930                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13931                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13932                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13933                                 tg3_flag_set(tp, CLKREQ_BUG);
13934                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13935                         tg3_flag_set(tp, L1PLLPD_EN);
13936                 }
13937         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13938                 /* BCM5785 devices are effectively PCIe devices, and should
13939                  * follow PCIe codepaths, but do not have a PCIe capabilities
13940                  * section.
13941                 */
13942                 tg3_flag_set(tp, PCI_EXPRESS);
13943         } else if (!tg3_flag(tp, 5705_PLUS) ||
13944                    tg3_flag(tp, 5780_CLASS)) {
13945                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13946                 if (!tp->pcix_cap) {
13947                         dev_err(&tp->pdev->dev,
13948                                 "Cannot find PCI-X capability, aborting\n");
13949                         return -EIO;
13950                 }
13951
13952                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13953                         tg3_flag_set(tp, PCIX_MODE);
13954         }
13955
13956         /* If we have an AMD 762 or VIA K8T800 chipset, write
13957          * reordering to the mailbox registers done by the host
13958          * controller can cause major troubles.  We read back from
13959          * every mailbox register write to force the writes to be
13960          * posted to the chip in order.
13961          */
13962         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13963             !tg3_flag(tp, PCI_EXPRESS))
13964                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13965
13966         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13967                              &tp->pci_cacheline_sz);
13968         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13969                              &tp->pci_lat_timer);
13970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13971             tp->pci_lat_timer < 64) {
13972                 tp->pci_lat_timer = 64;
13973                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13974                                       tp->pci_lat_timer);
13975         }
13976
13977         /* Important! -- It is critical that the PCI-X hw workaround
13978          * situation is decided before the first MMIO register access.
13979          */
13980         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13981                 /* 5700 BX chips need to have their TX producer index
13982                  * mailboxes written twice to workaround a bug.
13983                  */
13984                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13985
13986                 /* If we are in PCI-X mode, enable register write workaround.
13987                  *
13988                  * The workaround is to use indirect register accesses
13989                  * for all chip writes not to mailbox registers.
13990                  */
13991                 if (tg3_flag(tp, PCIX_MODE)) {
13992                         u32 pm_reg;
13993
13994                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13995
13996                         /* The chip can have it's power management PCI config
13997                          * space registers clobbered due to this bug.
13998                          * So explicitly force the chip into D0 here.
13999                          */
14000                         pci_read_config_dword(tp->pdev,
14001                                               tp->pm_cap + PCI_PM_CTRL,
14002                                               &pm_reg);
14003                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14004                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14005                         pci_write_config_dword(tp->pdev,
14006                                                tp->pm_cap + PCI_PM_CTRL,
14007                                                pm_reg);
14008
14009                         /* Also, force SERR#/PERR# in PCI command. */
14010                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14011                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14012                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14013                 }
14014         }
14015
14016         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14017                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14018         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14019                 tg3_flag_set(tp, PCI_32BIT);
14020
14021         /* Chip-specific fixup from Broadcom driver */
14022         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14023             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14024                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14025                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14026         }
14027
14028         /* Default fast path register access methods */
14029         tp->read32 = tg3_read32;
14030         tp->write32 = tg3_write32;
14031         tp->read32_mbox = tg3_read32;
14032         tp->write32_mbox = tg3_write32;
14033         tp->write32_tx_mbox = tg3_write32;
14034         tp->write32_rx_mbox = tg3_write32;
14035
14036         /* Various workaround register access methods */
14037         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14038                 tp->write32 = tg3_write_indirect_reg32;
14039         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14040                  (tg3_flag(tp, PCI_EXPRESS) &&
14041                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14042                 /*
14043                  * Back to back register writes can cause problems on these
14044                  * chips, the workaround is to read back all reg writes
14045                  * except those to mailbox regs.
14046                  *
14047                  * See tg3_write_indirect_reg32().
14048                  */
14049                 tp->write32 = tg3_write_flush_reg32;
14050         }
14051
14052         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14053                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14054                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14055                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14056         }
14057
14058         if (tg3_flag(tp, ICH_WORKAROUND)) {
14059                 tp->read32 = tg3_read_indirect_reg32;
14060                 tp->write32 = tg3_write_indirect_reg32;
14061                 tp->read32_mbox = tg3_read_indirect_mbox;
14062                 tp->write32_mbox = tg3_write_indirect_mbox;
14063                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14064                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14065
14066                 iounmap(tp->regs);
14067                 tp->regs = NULL;
14068
14069                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14070                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14071                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14072         }
14073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14074                 tp->read32_mbox = tg3_read32_mbox_5906;
14075                 tp->write32_mbox = tg3_write32_mbox_5906;
14076                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14077                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14078         }
14079
14080         if (tp->write32 == tg3_write_indirect_reg32 ||
14081             (tg3_flag(tp, PCIX_MODE) &&
14082              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14083               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14084                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14085
14086         /* The memory arbiter has to be enabled in order for SRAM accesses
14087          * to succeed.  Normally on powerup the tg3 chip firmware will make
14088          * sure it is enabled, but other entities such as system netboot
14089          * code might disable it.
14090          */
14091         val = tr32(MEMARB_MODE);
14092         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14093
14094         if (tg3_flag(tp, PCIX_MODE)) {
14095                 pci_read_config_dword(tp->pdev,
14096                                       tp->pcix_cap + PCI_X_STATUS, &val);
14097                 tp->pci_fn = val & 0x7;
14098         } else {
14099                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14100         }
14101
14102         /* Get eeprom hw config before calling tg3_set_power_state().
14103          * In particular, the TG3_FLAG_IS_NIC flag must be
14104          * determined before calling tg3_set_power_state() so that
14105          * we know whether or not to switch out of Vaux power.
14106          * When the flag is set, it means that GPIO1 is used for eeprom
14107          * write protect and also implies that it is a LOM where GPIOs
14108          * are not used to switch power.
14109          */
14110         tg3_get_eeprom_hw_cfg(tp);
14111
14112         if (tg3_flag(tp, ENABLE_APE)) {
14113                 /* Allow reads and writes to the
14114                  * APE register and memory space.
14115                  */
14116                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14117                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14118                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14119                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14120                                        pci_state_reg);
14121
14122                 tg3_ape_lock_init(tp);
14123         }
14124
14125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14126             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14127             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14128             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14129             tg3_flag(tp, 57765_PLUS))
14130                 tg3_flag_set(tp, CPMU_PRESENT);
14131
14132         /* Set up tp->grc_local_ctrl before calling
14133          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14134          * will bring 5700's external PHY out of reset.
14135          * It is also used as eeprom write protect on LOMs.
14136          */
14137         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14139             tg3_flag(tp, EEPROM_WRITE_PROT))
14140                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14141                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14142         /* Unused GPIO3 must be driven as output on 5752 because there
14143          * are no pull-up resistors on unused GPIO pins.
14144          */
14145         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14146                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14147
14148         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14149             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14151                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14152
14153         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14154             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14155                 /* Turn off the debug UART. */
14156                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14157                 if (tg3_flag(tp, IS_NIC))
14158                         /* Keep VMain power. */
14159                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14160                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14161         }
14162
14163         /* Switch out of Vaux if it is a NIC */
14164         tg3_pwrsrc_switch_to_vmain(tp);
14165
14166         /* Derive initial jumbo mode from MTU assigned in
14167          * ether_setup() via the alloc_etherdev() call
14168          */
14169         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14170                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14171
14172         /* Determine WakeOnLan speed to use. */
14173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14174             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14175             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14176             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14177                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14178         } else {
14179                 tg3_flag_set(tp, WOL_SPEED_100MB);
14180         }
14181
14182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14183                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14184
14185         /* A few boards don't want Ethernet@WireSpeed phy feature */
14186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14187             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14188              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14189              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14190             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14191             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14192                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14193
14194         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14195             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14196                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14197         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14198                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14199
14200         if (tg3_flag(tp, 5705_PLUS) &&
14201             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14202             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14203             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14204             !tg3_flag(tp, 57765_PLUS)) {
14205                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14207                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14208                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14209                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14210                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14211                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14212                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14213                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14214                 } else
14215                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14216         }
14217
14218         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14219             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14220                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14221                 if (tp->phy_otp == 0)
14222                         tp->phy_otp = TG3_OTP_DEFAULT;
14223         }
14224
14225         if (tg3_flag(tp, CPMU_PRESENT))
14226                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14227         else
14228                 tp->mi_mode = MAC_MI_MODE_BASE;
14229
14230         tp->coalesce_mode = 0;
14231         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14232             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14233                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14234
14235         /* Set these bits to enable statistics workaround. */
14236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14237             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14238             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14239                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14240                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14241         }
14242
14243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14245                 tg3_flag_set(tp, USE_PHYLIB);
14246
14247         err = tg3_mdio_init(tp);
14248         if (err)
14249                 return err;
14250
14251         /* Initialize data/descriptor byte/word swapping. */
14252         val = tr32(GRC_MODE);
14253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14254                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14255                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14256                         GRC_MODE_B2HRX_ENABLE |
14257                         GRC_MODE_HTX2B_ENABLE |
14258                         GRC_MODE_HOST_STACKUP);
14259         else
14260                 val &= GRC_MODE_HOST_STACKUP;
14261
14262         tw32(GRC_MODE, val | tp->grc_mode);
14263
14264         tg3_switch_clocks(tp);
14265
14266         /* Clear this out for sanity. */
14267         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14268
14269         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14270                               &pci_state_reg);
14271         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14272             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14273                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14274
14275                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14276                     chiprevid == CHIPREV_ID_5701_B0 ||
14277                     chiprevid == CHIPREV_ID_5701_B2 ||
14278                     chiprevid == CHIPREV_ID_5701_B5) {
14279                         void __iomem *sram_base;
14280
14281                         /* Write some dummy words into the SRAM status block
14282                          * area, see if it reads back correctly.  If the return
14283                          * value is bad, force enable the PCIX workaround.
14284                          */
14285                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14286
14287                         writel(0x00000000, sram_base);
14288                         writel(0x00000000, sram_base + 4);
14289                         writel(0xffffffff, sram_base + 4);
14290                         if (readl(sram_base) != 0x00000000)
14291                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14292                 }
14293         }
14294
14295         udelay(50);
14296         tg3_nvram_init(tp);
14297
14298         grc_misc_cfg = tr32(GRC_MISC_CFG);
14299         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14300
14301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14302             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14303              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14304                 tg3_flag_set(tp, IS_5788);
14305
14306         if (!tg3_flag(tp, IS_5788) &&
14307             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14308                 tg3_flag_set(tp, TAGGED_STATUS);
14309         if (tg3_flag(tp, TAGGED_STATUS)) {
14310                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14311                                       HOSTCC_MODE_CLRTICK_TXBD);
14312
14313                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14314                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14315                                        tp->misc_host_ctrl);
14316         }
14317
14318         /* Preserve the APE MAC_MODE bits */
14319         if (tg3_flag(tp, ENABLE_APE))
14320                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14321         else
14322                 tp->mac_mode = TG3_DEF_MAC_MODE;
14323
14324         /* these are limited to 10/100 only */
14325         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14326              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14327             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14328              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14329              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14330               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14331               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14332             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14333              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14334               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14335               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14336             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14337             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14338             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14339             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14340                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14341
14342         err = tg3_phy_probe(tp);
14343         if (err) {
14344                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14345                 /* ... but do not return immediately ... */
14346                 tg3_mdio_fini(tp);
14347         }
14348
14349         tg3_read_vpd(tp);
14350         tg3_read_fw_ver(tp);
14351
14352         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14353                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14354         } else {
14355                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14356                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14357                 else
14358                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14359         }
14360
14361         /* 5700 {AX,BX} chips have a broken status block link
14362          * change bit implementation, so we must use the
14363          * status register in those cases.
14364          */
14365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14366                 tg3_flag_set(tp, USE_LINKCHG_REG);
14367         else
14368                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14369
14370         /* The led_ctrl is set during tg3_phy_probe, here we might
14371          * have to force the link status polling mechanism based
14372          * upon subsystem IDs.
14373          */
14374         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14376             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14377                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14378                 tg3_flag_set(tp, USE_LINKCHG_REG);
14379         }
14380
14381         /* For all SERDES we poll the MAC status register. */
14382         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14383                 tg3_flag_set(tp, POLL_SERDES);
14384         else
14385                 tg3_flag_clear(tp, POLL_SERDES);
14386
14387         tp->rx_offset = NET_IP_ALIGN;
14388         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14390             tg3_flag(tp, PCIX_MODE)) {
14391                 tp->rx_offset = 0;
14392 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14393                 tp->rx_copy_thresh = ~(u16)0;
14394 #endif
14395         }
14396
14397         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14398         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14399         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14400
14401         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14402
14403         /* Increment the rx prod index on the rx std ring by at most
14404          * 8 for these chips to workaround hw errata.
14405          */
14406         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14407             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14408             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14409                 tp->rx_std_max_post = 8;
14410
14411         if (tg3_flag(tp, ASPM_WORKAROUND))
14412                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14413                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14414
14415         return err;
14416 }
14417
14418 #ifdef CONFIG_SPARC
14419 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14420 {
14421         struct net_device *dev = tp->dev;
14422         struct pci_dev *pdev = tp->pdev;
14423         struct device_node *dp = pci_device_to_OF_node(pdev);
14424         const unsigned char *addr;
14425         int len;
14426
14427         addr = of_get_property(dp, "local-mac-address", &len);
14428         if (addr && len == 6) {
14429                 memcpy(dev->dev_addr, addr, 6);
14430                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14431                 return 0;
14432         }
14433         return -ENODEV;
14434 }
14435
14436 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14437 {
14438         struct net_device *dev = tp->dev;
14439
14440         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14441         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14442         return 0;
14443 }
14444 #endif
14445
14446 static int __devinit tg3_get_device_address(struct tg3 *tp)
14447 {
14448         struct net_device *dev = tp->dev;
14449         u32 hi, lo, mac_offset;
14450         int addr_ok = 0;
14451
14452 #ifdef CONFIG_SPARC
14453         if (!tg3_get_macaddr_sparc(tp))
14454                 return 0;
14455 #endif
14456
14457         mac_offset = 0x7c;
14458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14459             tg3_flag(tp, 5780_CLASS)) {
14460                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14461                         mac_offset = 0xcc;
14462                 if (tg3_nvram_lock(tp))
14463                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14464                 else
14465                         tg3_nvram_unlock(tp);
14466         } else if (tg3_flag(tp, 5717_PLUS)) {
14467                 if (tp->pci_fn & 1)
14468                         mac_offset = 0xcc;
14469                 if (tp->pci_fn > 1)
14470                         mac_offset += 0x18c;
14471         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14472                 mac_offset = 0x10;
14473
14474         /* First try to get it from MAC address mailbox. */
14475         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14476         if ((hi >> 16) == 0x484b) {
14477                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14478                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14479
14480                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14481                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14482                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14483                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14484                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14485
14486                 /* Some old bootcode may report a 0 MAC address in SRAM */
14487                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14488         }
14489         if (!addr_ok) {
14490                 /* Next, try NVRAM. */
14491                 if (!tg3_flag(tp, NO_NVRAM) &&
14492                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14493                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14494                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14495                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14496                 }
14497                 /* Finally just fetch it out of the MAC control regs. */
14498                 else {
14499                         hi = tr32(MAC_ADDR_0_HIGH);
14500                         lo = tr32(MAC_ADDR_0_LOW);
14501
14502                         dev->dev_addr[5] = lo & 0xff;
14503                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14504                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14505                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14506                         dev->dev_addr[1] = hi & 0xff;
14507                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14508                 }
14509         }
14510
14511         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14512 #ifdef CONFIG_SPARC
14513                 if (!tg3_get_default_macaddr_sparc(tp))
14514                         return 0;
14515 #endif
14516                 return -EINVAL;
14517         }
14518         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14519         return 0;
14520 }
14521
14522 #define BOUNDARY_SINGLE_CACHELINE       1
14523 #define BOUNDARY_MULTI_CACHELINE        2
14524
14525 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14526 {
14527         int cacheline_size;
14528         u8 byte;
14529         int goal;
14530
14531         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14532         if (byte == 0)
14533                 cacheline_size = 1024;
14534         else
14535                 cacheline_size = (int) byte * 4;
14536
14537         /* On 5703 and later chips, the boundary bits have no
14538          * effect.
14539          */
14540         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14541             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14542             !tg3_flag(tp, PCI_EXPRESS))
14543                 goto out;
14544
14545 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14546         goal = BOUNDARY_MULTI_CACHELINE;
14547 #else
14548 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14549         goal = BOUNDARY_SINGLE_CACHELINE;
14550 #else
14551         goal = 0;
14552 #endif
14553 #endif
14554
14555         if (tg3_flag(tp, 57765_PLUS)) {
14556                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14557                 goto out;
14558         }
14559
14560         if (!goal)
14561                 goto out;
14562
14563         /* PCI controllers on most RISC systems tend to disconnect
14564          * when a device tries to burst across a cache-line boundary.
14565          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14566          *
14567          * Unfortunately, for PCI-E there are only limited
14568          * write-side controls for this, and thus for reads
14569          * we will still get the disconnects.  We'll also waste
14570          * these PCI cycles for both read and write for chips
14571          * other than 5700 and 5701 which do not implement the
14572          * boundary bits.
14573          */
14574         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14575                 switch (cacheline_size) {
14576                 case 16:
14577                 case 32:
14578                 case 64:
14579                 case 128:
14580                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14581                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14582                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14583                         } else {
14584                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14585                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14586                         }
14587                         break;
14588
14589                 case 256:
14590                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14591                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14592                         break;
14593
14594                 default:
14595                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14596                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14597                         break;
14598                 }
14599         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14600                 switch (cacheline_size) {
14601                 case 16:
14602                 case 32:
14603                 case 64:
14604                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14605                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14606                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14607                                 break;
14608                         }
14609                         /* fallthrough */
14610                 case 128:
14611                 default:
14612                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14613                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14614                         break;
14615                 }
14616         } else {
14617                 switch (cacheline_size) {
14618                 case 16:
14619                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14620                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14621                                         DMA_RWCTRL_WRITE_BNDRY_16);
14622                                 break;
14623                         }
14624                         /* fallthrough */
14625                 case 32:
14626                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14627                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14628                                         DMA_RWCTRL_WRITE_BNDRY_32);
14629                                 break;
14630                         }
14631                         /* fallthrough */
14632                 case 64:
14633                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14634                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14635                                         DMA_RWCTRL_WRITE_BNDRY_64);
14636                                 break;
14637                         }
14638                         /* fallthrough */
14639                 case 128:
14640                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14641                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14642                                         DMA_RWCTRL_WRITE_BNDRY_128);
14643                                 break;
14644                         }
14645                         /* fallthrough */
14646                 case 256:
14647                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14648                                 DMA_RWCTRL_WRITE_BNDRY_256);
14649                         break;
14650                 case 512:
14651                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14652                                 DMA_RWCTRL_WRITE_BNDRY_512);
14653                         break;
14654                 case 1024:
14655                 default:
14656                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14657                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14658                         break;
14659                 }
14660         }
14661
14662 out:
14663         return val;
14664 }
14665
14666 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14667 {
14668         struct tg3_internal_buffer_desc test_desc;
14669         u32 sram_dma_descs;
14670         int i, ret;
14671
14672         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14673
14674         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14675         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14676         tw32(RDMAC_STATUS, 0);
14677         tw32(WDMAC_STATUS, 0);
14678
14679         tw32(BUFMGR_MODE, 0);
14680         tw32(FTQ_RESET, 0);
14681
14682         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14683         test_desc.addr_lo = buf_dma & 0xffffffff;
14684         test_desc.nic_mbuf = 0x00002100;
14685         test_desc.len = size;
14686
14687         /*
14688          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14689          * the *second* time the tg3 driver was getting loaded after an
14690          * initial scan.
14691          *
14692          * Broadcom tells me:
14693          *   ...the DMA engine is connected to the GRC block and a DMA
14694          *   reset may affect the GRC block in some unpredictable way...
14695          *   The behavior of resets to individual blocks has not been tested.
14696          *
14697          * Broadcom noted the GRC reset will also reset all sub-components.
14698          */
14699         if (to_device) {
14700                 test_desc.cqid_sqid = (13 << 8) | 2;
14701
14702                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14703                 udelay(40);
14704         } else {
14705                 test_desc.cqid_sqid = (16 << 8) | 7;
14706
14707                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14708                 udelay(40);
14709         }
14710         test_desc.flags = 0x00000005;
14711
14712         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14713                 u32 val;
14714
14715                 val = *(((u32 *)&test_desc) + i);
14716                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14717                                        sram_dma_descs + (i * sizeof(u32)));
14718                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14719         }
14720         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14721
14722         if (to_device)
14723                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14724         else
14725                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14726
14727         ret = -ENODEV;
14728         for (i = 0; i < 40; i++) {
14729                 u32 val;
14730
14731                 if (to_device)
14732                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14733                 else
14734                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14735                 if ((val & 0xffff) == sram_dma_descs) {
14736                         ret = 0;
14737                         break;
14738                 }
14739
14740                 udelay(100);
14741         }
14742
14743         return ret;
14744 }
14745
14746 #define TEST_BUFFER_SIZE        0x2000
14747
14748 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14749         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14750         { },
14751 };
14752
14753 static int __devinit tg3_test_dma(struct tg3 *tp)
14754 {
14755         dma_addr_t buf_dma;
14756         u32 *buf, saved_dma_rwctrl;
14757         int ret = 0;
14758
14759         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14760                                  &buf_dma, GFP_KERNEL);
14761         if (!buf) {
14762                 ret = -ENOMEM;
14763                 goto out_nofree;
14764         }
14765
14766         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14767                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14768
14769         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14770
14771         if (tg3_flag(tp, 57765_PLUS))
14772                 goto out;
14773
14774         if (tg3_flag(tp, PCI_EXPRESS)) {
14775                 /* DMA read watermark not used on PCIE */
14776                 tp->dma_rwctrl |= 0x00180000;
14777         } else if (!tg3_flag(tp, PCIX_MODE)) {
14778                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14779                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14780                         tp->dma_rwctrl |= 0x003f0000;
14781                 else
14782                         tp->dma_rwctrl |= 0x003f000f;
14783         } else {
14784                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14785                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14786                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14787                         u32 read_water = 0x7;
14788
14789                         /* If the 5704 is behind the EPB bridge, we can
14790                          * do the less restrictive ONE_DMA workaround for
14791                          * better performance.
14792                          */
14793                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14794                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14795                                 tp->dma_rwctrl |= 0x8000;
14796                         else if (ccval == 0x6 || ccval == 0x7)
14797                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14798
14799                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14800                                 read_water = 4;
14801                         /* Set bit 23 to enable PCIX hw bug fix */
14802                         tp->dma_rwctrl |=
14803                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14804                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14805                                 (1 << 23);
14806                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14807                         /* 5780 always in PCIX mode */
14808                         tp->dma_rwctrl |= 0x00144000;
14809                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14810                         /* 5714 always in PCIX mode */
14811                         tp->dma_rwctrl |= 0x00148000;
14812                 } else {
14813                         tp->dma_rwctrl |= 0x001b000f;
14814                 }
14815         }
14816
14817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14818             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14819                 tp->dma_rwctrl &= 0xfffffff0;
14820
14821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14822             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14823                 /* Remove this if it causes problems for some boards. */
14824                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14825
14826                 /* On 5700/5701 chips, we need to set this bit.
14827                  * Otherwise the chip will issue cacheline transactions
14828                  * to streamable DMA memory with not all the byte
14829                  * enables turned on.  This is an error on several
14830                  * RISC PCI controllers, in particular sparc64.
14831                  *
14832                  * On 5703/5704 chips, this bit has been reassigned
14833                  * a different meaning.  In particular, it is used
14834                  * on those chips to enable a PCI-X workaround.
14835                  */
14836                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14837         }
14838
14839         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14840
14841 #if 0
14842         /* Unneeded, already done by tg3_get_invariants.  */
14843         tg3_switch_clocks(tp);
14844 #endif
14845
14846         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14847             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14848                 goto out;
14849
14850         /* It is best to perform DMA test with maximum write burst size
14851          * to expose the 5700/5701 write DMA bug.
14852          */
14853         saved_dma_rwctrl = tp->dma_rwctrl;
14854         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14855         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14856
14857         while (1) {
14858                 u32 *p = buf, i;
14859
14860                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14861                         p[i] = i;
14862
14863                 /* Send the buffer to the chip. */
14864                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14865                 if (ret) {
14866                         dev_err(&tp->pdev->dev,
14867                                 "%s: Buffer write failed. err = %d\n",
14868                                 __func__, ret);
14869                         break;
14870                 }
14871
14872 #if 0
14873                 /* validate data reached card RAM correctly. */
14874                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14875                         u32 val;
14876                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14877                         if (le32_to_cpu(val) != p[i]) {
14878                                 dev_err(&tp->pdev->dev,
14879                                         "%s: Buffer corrupted on device! "
14880                                         "(%d != %d)\n", __func__, val, i);
14881                                 /* ret = -ENODEV here? */
14882                         }
14883                         p[i] = 0;
14884                 }
14885 #endif
14886                 /* Now read it back. */
14887                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14888                 if (ret) {
14889                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14890                                 "err = %d\n", __func__, ret);
14891                         break;
14892                 }
14893
14894                 /* Verify it. */
14895                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14896                         if (p[i] == i)
14897                                 continue;
14898
14899                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14900                             DMA_RWCTRL_WRITE_BNDRY_16) {
14901                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14902                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14903                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14904                                 break;
14905                         } else {
14906                                 dev_err(&tp->pdev->dev,
14907                                         "%s: Buffer corrupted on read back! "
14908                                         "(%d != %d)\n", __func__, p[i], i);
14909                                 ret = -ENODEV;
14910                                 goto out;
14911                         }
14912                 }
14913
14914                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14915                         /* Success. */
14916                         ret = 0;
14917                         break;
14918                 }
14919         }
14920         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14921             DMA_RWCTRL_WRITE_BNDRY_16) {
14922                 /* DMA test passed without adjusting DMA boundary,
14923                  * now look for chipsets that are known to expose the
14924                  * DMA bug without failing the test.
14925                  */
14926                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14927                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14928                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14929                 } else {
14930                         /* Safe to use the calculated DMA boundary. */
14931                         tp->dma_rwctrl = saved_dma_rwctrl;
14932                 }
14933
14934                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14935         }
14936
14937 out:
14938         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14939 out_nofree:
14940         return ret;
14941 }
14942
14943 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14944 {
14945         if (tg3_flag(tp, 57765_PLUS)) {
14946                 tp->bufmgr_config.mbuf_read_dma_low_water =
14947                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14948                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14949                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14950                 tp->bufmgr_config.mbuf_high_water =
14951                         DEFAULT_MB_HIGH_WATER_57765;
14952
14953                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14954                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14955                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14956                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14957                 tp->bufmgr_config.mbuf_high_water_jumbo =
14958                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14959         } else if (tg3_flag(tp, 5705_PLUS)) {
14960                 tp->bufmgr_config.mbuf_read_dma_low_water =
14961                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14962                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14963                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14964                 tp->bufmgr_config.mbuf_high_water =
14965                         DEFAULT_MB_HIGH_WATER_5705;
14966                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14967                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14968                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14969                         tp->bufmgr_config.mbuf_high_water =
14970                                 DEFAULT_MB_HIGH_WATER_5906;
14971                 }
14972
14973                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14974                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14975                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14976                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14977                 tp->bufmgr_config.mbuf_high_water_jumbo =
14978                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14979         } else {
14980                 tp->bufmgr_config.mbuf_read_dma_low_water =
14981                         DEFAULT_MB_RDMA_LOW_WATER;
14982                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14983                         DEFAULT_MB_MACRX_LOW_WATER;
14984                 tp->bufmgr_config.mbuf_high_water =
14985                         DEFAULT_MB_HIGH_WATER;
14986
14987                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14988                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14989                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14990                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14991                 tp->bufmgr_config.mbuf_high_water_jumbo =
14992                         DEFAULT_MB_HIGH_WATER_JUMBO;
14993         }
14994
14995         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14996         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14997 }
14998
14999 static char * __devinit tg3_phy_string(struct tg3 *tp)
15000 {
15001         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15002         case TG3_PHY_ID_BCM5400:        return "5400";
15003         case TG3_PHY_ID_BCM5401:        return "5401";
15004         case TG3_PHY_ID_BCM5411:        return "5411";
15005         case TG3_PHY_ID_BCM5701:        return "5701";
15006         case TG3_PHY_ID_BCM5703:        return "5703";
15007         case TG3_PHY_ID_BCM5704:        return "5704";
15008         case TG3_PHY_ID_BCM5705:        return "5705";
15009         case TG3_PHY_ID_BCM5750:        return "5750";
15010         case TG3_PHY_ID_BCM5752:        return "5752";
15011         case TG3_PHY_ID_BCM5714:        return "5714";
15012         case TG3_PHY_ID_BCM5780:        return "5780";
15013         case TG3_PHY_ID_BCM5755:        return "5755";
15014         case TG3_PHY_ID_BCM5787:        return "5787";
15015         case TG3_PHY_ID_BCM5784:        return "5784";
15016         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15017         case TG3_PHY_ID_BCM5906:        return "5906";
15018         case TG3_PHY_ID_BCM5761:        return "5761";
15019         case TG3_PHY_ID_BCM5718C:       return "5718C";
15020         case TG3_PHY_ID_BCM5718S:       return "5718S";
15021         case TG3_PHY_ID_BCM57765:       return "57765";
15022         case TG3_PHY_ID_BCM5719C:       return "5719C";
15023         case TG3_PHY_ID_BCM5720C:       return "5720C";
15024         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15025         case 0:                 return "serdes";
15026         default:                return "unknown";
15027         }
15028 }
15029
15030 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15031 {
15032         if (tg3_flag(tp, PCI_EXPRESS)) {
15033                 strcpy(str, "PCI Express");
15034                 return str;
15035         } else if (tg3_flag(tp, PCIX_MODE)) {
15036                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15037
15038                 strcpy(str, "PCIX:");
15039
15040                 if ((clock_ctrl == 7) ||
15041                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15042                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15043                         strcat(str, "133MHz");
15044                 else if (clock_ctrl == 0)
15045                         strcat(str, "33MHz");
15046                 else if (clock_ctrl == 2)
15047                         strcat(str, "50MHz");
15048                 else if (clock_ctrl == 4)
15049                         strcat(str, "66MHz");
15050                 else if (clock_ctrl == 6)
15051                         strcat(str, "100MHz");
15052         } else {
15053                 strcpy(str, "PCI:");
15054                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15055                         strcat(str, "66MHz");
15056                 else
15057                         strcat(str, "33MHz");
15058         }
15059         if (tg3_flag(tp, PCI_32BIT))
15060                 strcat(str, ":32-bit");
15061         else
15062                 strcat(str, ":64-bit");
15063         return str;
15064 }
15065
15066 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15067 {
15068         struct pci_dev *peer;
15069         unsigned int func, devnr = tp->pdev->devfn & ~7;
15070
15071         for (func = 0; func < 8; func++) {
15072                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15073                 if (peer && peer != tp->pdev)
15074                         break;
15075                 pci_dev_put(peer);
15076         }
15077         /* 5704 can be configured in single-port mode, set peer to
15078          * tp->pdev in that case.
15079          */
15080         if (!peer) {
15081                 peer = tp->pdev;
15082                 return peer;
15083         }
15084
15085         /*
15086          * We don't need to keep the refcount elevated; there's no way
15087          * to remove one half of this device without removing the other
15088          */
15089         pci_dev_put(peer);
15090
15091         return peer;
15092 }
15093
15094 static void __devinit tg3_init_coal(struct tg3 *tp)
15095 {
15096         struct ethtool_coalesce *ec = &tp->coal;
15097
15098         memset(ec, 0, sizeof(*ec));
15099         ec->cmd = ETHTOOL_GCOALESCE;
15100         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15101         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15102         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15103         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15104         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15105         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15106         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15107         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15108         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15109
15110         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15111                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15112                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15113                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15114                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15115                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15116         }
15117
15118         if (tg3_flag(tp, 5705_PLUS)) {
15119                 ec->rx_coalesce_usecs_irq = 0;
15120                 ec->tx_coalesce_usecs_irq = 0;
15121                 ec->stats_block_coalesce_usecs = 0;
15122         }
15123 }
15124
15125 static const struct net_device_ops tg3_netdev_ops = {
15126         .ndo_open               = tg3_open,
15127         .ndo_stop               = tg3_close,
15128         .ndo_start_xmit         = tg3_start_xmit,
15129         .ndo_get_stats64        = tg3_get_stats64,
15130         .ndo_validate_addr      = eth_validate_addr,
15131         .ndo_set_multicast_list = tg3_set_rx_mode,
15132         .ndo_set_mac_address    = tg3_set_mac_addr,
15133         .ndo_do_ioctl           = tg3_ioctl,
15134         .ndo_tx_timeout         = tg3_tx_timeout,
15135         .ndo_change_mtu         = tg3_change_mtu,
15136         .ndo_fix_features       = tg3_fix_features,
15137         .ndo_set_features       = tg3_set_features,
15138 #ifdef CONFIG_NET_POLL_CONTROLLER
15139         .ndo_poll_controller    = tg3_poll_controller,
15140 #endif
15141 };
15142
15143 static int __devinit tg3_init_one(struct pci_dev *pdev,
15144                                   const struct pci_device_id *ent)
15145 {
15146         struct net_device *dev;
15147         struct tg3 *tp;
15148         int i, err, pm_cap;
15149         u32 sndmbx, rcvmbx, intmbx;
15150         char str[40];
15151         u64 dma_mask, persist_dma_mask;
15152         u32 features = 0;
15153
15154         printk_once(KERN_INFO "%s\n", version);
15155
15156         err = pci_enable_device(pdev);
15157         if (err) {
15158                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15159                 return err;
15160         }
15161
15162         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15163         if (err) {
15164                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15165                 goto err_out_disable_pdev;
15166         }
15167
15168         pci_set_master(pdev);
15169
15170         /* Find power-management capability. */
15171         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15172         if (pm_cap == 0) {
15173                 dev_err(&pdev->dev,
15174                         "Cannot find Power Management capability, aborting\n");
15175                 err = -EIO;
15176                 goto err_out_free_res;
15177         }
15178
15179         err = pci_set_power_state(pdev, PCI_D0);
15180         if (err) {
15181                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15182                 goto err_out_free_res;
15183         }
15184
15185         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15186         if (!dev) {
15187                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15188                 err = -ENOMEM;
15189                 goto err_out_power_down;
15190         }
15191
15192         SET_NETDEV_DEV(dev, &pdev->dev);
15193
15194         tp = netdev_priv(dev);
15195         tp->pdev = pdev;
15196         tp->dev = dev;
15197         tp->pm_cap = pm_cap;
15198         tp->rx_mode = TG3_DEF_RX_MODE;
15199         tp->tx_mode = TG3_DEF_TX_MODE;
15200
15201         if (tg3_debug > 0)
15202                 tp->msg_enable = tg3_debug;
15203         else
15204                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15205
15206         /* The word/byte swap controls here control register access byte
15207          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15208          * setting below.
15209          */
15210         tp->misc_host_ctrl =
15211                 MISC_HOST_CTRL_MASK_PCI_INT |
15212                 MISC_HOST_CTRL_WORD_SWAP |
15213                 MISC_HOST_CTRL_INDIR_ACCESS |
15214                 MISC_HOST_CTRL_PCISTATE_RW;
15215
15216         /* The NONFRM (non-frame) byte/word swap controls take effect
15217          * on descriptor entries, anything which isn't packet data.
15218          *
15219          * The StrongARM chips on the board (one for tx, one for rx)
15220          * are running in big-endian mode.
15221          */
15222         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15223                         GRC_MODE_WSWAP_NONFRM_DATA);
15224 #ifdef __BIG_ENDIAN
15225         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15226 #endif
15227         spin_lock_init(&tp->lock);
15228         spin_lock_init(&tp->indirect_lock);
15229         INIT_WORK(&tp->reset_task, tg3_reset_task);
15230
15231         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15232         if (!tp->regs) {
15233                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15234                 err = -ENOMEM;
15235                 goto err_out_free_dev;
15236         }
15237
15238         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15239             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15240             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15241             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15242             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15243             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15244             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15245             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15246                 tg3_flag_set(tp, ENABLE_APE);
15247                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15248                 if (!tp->aperegs) {
15249                         dev_err(&pdev->dev,
15250                                 "Cannot map APE registers, aborting\n");
15251                         err = -ENOMEM;
15252                         goto err_out_iounmap;
15253                 }
15254         }
15255
15256         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15257         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15258
15259         dev->ethtool_ops = &tg3_ethtool_ops;
15260         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15261         dev->netdev_ops = &tg3_netdev_ops;
15262         dev->irq = pdev->irq;
15263
15264         err = tg3_get_invariants(tp);
15265         if (err) {
15266                 dev_err(&pdev->dev,
15267                         "Problem fetching invariants of chip, aborting\n");
15268                 goto err_out_apeunmap;
15269         }
15270
15271         /* The EPB bridge inside 5714, 5715, and 5780 and any
15272          * device behind the EPB cannot support DMA addresses > 40-bit.
15273          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15274          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15275          * do DMA address check in tg3_start_xmit().
15276          */
15277         if (tg3_flag(tp, IS_5788))
15278                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15279         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15280                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15281 #ifdef CONFIG_HIGHMEM
15282                 dma_mask = DMA_BIT_MASK(64);
15283 #endif
15284         } else
15285                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15286
15287         /* Configure DMA attributes. */
15288         if (dma_mask > DMA_BIT_MASK(32)) {
15289                 err = pci_set_dma_mask(pdev, dma_mask);
15290                 if (!err) {
15291                         features |= NETIF_F_HIGHDMA;
15292                         err = pci_set_consistent_dma_mask(pdev,
15293                                                           persist_dma_mask);
15294                         if (err < 0) {
15295                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15296                                         "DMA for consistent allocations\n");
15297                                 goto err_out_apeunmap;
15298                         }
15299                 }
15300         }
15301         if (err || dma_mask == DMA_BIT_MASK(32)) {
15302                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15303                 if (err) {
15304                         dev_err(&pdev->dev,
15305                                 "No usable DMA configuration, aborting\n");
15306                         goto err_out_apeunmap;
15307                 }
15308         }
15309
15310         tg3_init_bufmgr_config(tp);
15311
15312         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15313
15314         /* 5700 B0 chips do not support checksumming correctly due
15315          * to hardware bugs.
15316          */
15317         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15318                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15319
15320                 if (tg3_flag(tp, 5755_PLUS))
15321                         features |= NETIF_F_IPV6_CSUM;
15322         }
15323
15324         /* TSO is on by default on chips that support hardware TSO.
15325          * Firmware TSO on older chips gives lower performance, so it
15326          * is off by default, but can be enabled using ethtool.
15327          */
15328         if ((tg3_flag(tp, HW_TSO_1) ||
15329              tg3_flag(tp, HW_TSO_2) ||
15330              tg3_flag(tp, HW_TSO_3)) &&
15331             (features & NETIF_F_IP_CSUM))
15332                 features |= NETIF_F_TSO;
15333         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15334                 if (features & NETIF_F_IPV6_CSUM)
15335                         features |= NETIF_F_TSO6;
15336                 if (tg3_flag(tp, HW_TSO_3) ||
15337                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15338                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15339                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15340                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15341                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15342                         features |= NETIF_F_TSO_ECN;
15343         }
15344
15345         dev->features |= features;
15346         dev->vlan_features |= features;
15347
15348         /*
15349          * Add loopback capability only for a subset of devices that support
15350          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15351          * loopback for the remaining devices.
15352          */
15353         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15354             !tg3_flag(tp, CPMU_PRESENT))
15355                 /* Add the loopback capability */
15356                 features |= NETIF_F_LOOPBACK;
15357
15358         dev->hw_features |= features;
15359
15360         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15361             !tg3_flag(tp, TSO_CAPABLE) &&
15362             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15363                 tg3_flag_set(tp, MAX_RXPEND_64);
15364                 tp->rx_pending = 63;
15365         }
15366
15367         err = tg3_get_device_address(tp);
15368         if (err) {
15369                 dev_err(&pdev->dev,
15370                         "Could not obtain valid ethernet address, aborting\n");
15371                 goto err_out_apeunmap;
15372         }
15373
15374         /*
15375          * Reset chip in case UNDI or EFI driver did not shutdown
15376          * DMA self test will enable WDMAC and we'll see (spurious)
15377          * pending DMA on the PCI bus at that point.
15378          */
15379         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15380             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15381                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15382                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15383         }
15384
15385         err = tg3_test_dma(tp);
15386         if (err) {
15387                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15388                 goto err_out_apeunmap;
15389         }
15390
15391         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15392         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15393         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15394         for (i = 0; i < tp->irq_max; i++) {
15395                 struct tg3_napi *tnapi = &tp->napi[i];
15396
15397                 tnapi->tp = tp;
15398                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15399
15400                 tnapi->int_mbox = intmbx;
15401                 if (i < 4)
15402                         intmbx += 0x8;
15403                 else
15404                         intmbx += 0x4;
15405
15406                 tnapi->consmbox = rcvmbx;
15407                 tnapi->prodmbox = sndmbx;
15408
15409                 if (i)
15410                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15411                 else
15412                         tnapi->coal_now = HOSTCC_MODE_NOW;
15413
15414                 if (!tg3_flag(tp, SUPPORT_MSIX))
15415                         break;
15416
15417                 /*
15418                  * If we support MSIX, we'll be using RSS.  If we're using
15419                  * RSS, the first vector only handles link interrupts and the
15420                  * remaining vectors handle rx and tx interrupts.  Reuse the
15421                  * mailbox values for the next iteration.  The values we setup
15422                  * above are still useful for the single vectored mode.
15423                  */
15424                 if (!i)
15425                         continue;
15426
15427                 rcvmbx += 0x8;
15428
15429                 if (sndmbx & 0x4)
15430                         sndmbx -= 0x4;
15431                 else
15432                         sndmbx += 0xc;
15433         }
15434
15435         tg3_init_coal(tp);
15436
15437         pci_set_drvdata(pdev, dev);
15438
15439         if (tg3_flag(tp, 5717_PLUS)) {
15440                 /* Resume a low-power mode */
15441                 tg3_frob_aux_power(tp, false);
15442         }
15443
15444         err = register_netdev(dev);
15445         if (err) {
15446                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15447                 goto err_out_apeunmap;
15448         }
15449
15450         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15451                     tp->board_part_number,
15452                     tp->pci_chip_rev_id,
15453                     tg3_bus_string(tp, str),
15454                     dev->dev_addr);
15455
15456         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15457                 struct phy_device *phydev;
15458                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15459                 netdev_info(dev,
15460                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15461                             phydev->drv->name, dev_name(&phydev->dev));
15462         } else {
15463                 char *ethtype;
15464
15465                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15466                         ethtype = "10/100Base-TX";
15467                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15468                         ethtype = "1000Base-SX";
15469                 else
15470                         ethtype = "10/100/1000Base-T";
15471
15472                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15473                             "(WireSpeed[%d], EEE[%d])\n",
15474                             tg3_phy_string(tp), ethtype,
15475                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15476                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15477         }
15478
15479         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15480                     (dev->features & NETIF_F_RXCSUM) != 0,
15481                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15482                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15483                     tg3_flag(tp, ENABLE_ASF) != 0,
15484                     tg3_flag(tp, TSO_CAPABLE) != 0);
15485         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15486                     tp->dma_rwctrl,
15487                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15488                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15489
15490         pci_save_state(pdev);
15491
15492         return 0;
15493
15494 err_out_apeunmap:
15495         if (tp->aperegs) {
15496                 iounmap(tp->aperegs);
15497                 tp->aperegs = NULL;
15498         }
15499
15500 err_out_iounmap:
15501         if (tp->regs) {
15502                 iounmap(tp->regs);
15503                 tp->regs = NULL;
15504         }
15505
15506 err_out_free_dev:
15507         free_netdev(dev);
15508
15509 err_out_power_down:
15510         pci_set_power_state(pdev, PCI_D3hot);
15511
15512 err_out_free_res:
15513         pci_release_regions(pdev);
15514
15515 err_out_disable_pdev:
15516         pci_disable_device(pdev);
15517         pci_set_drvdata(pdev, NULL);
15518         return err;
15519 }
15520
15521 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15522 {
15523         struct net_device *dev = pci_get_drvdata(pdev);
15524
15525         if (dev) {
15526                 struct tg3 *tp = netdev_priv(dev);
15527
15528                 if (tp->fw)
15529                         release_firmware(tp->fw);
15530
15531                 cancel_work_sync(&tp->reset_task);
15532
15533                 if (!tg3_flag(tp, USE_PHYLIB)) {
15534                         tg3_phy_fini(tp);
15535                         tg3_mdio_fini(tp);
15536                 }
15537
15538                 unregister_netdev(dev);
15539                 if (tp->aperegs) {
15540                         iounmap(tp->aperegs);
15541                         tp->aperegs = NULL;
15542                 }
15543                 if (tp->regs) {
15544                         iounmap(tp->regs);
15545                         tp->regs = NULL;
15546                 }
15547                 free_netdev(dev);
15548                 pci_release_regions(pdev);
15549                 pci_disable_device(pdev);
15550                 pci_set_drvdata(pdev, NULL);
15551         }
15552 }
15553
15554 #ifdef CONFIG_PM_SLEEP
15555 static int tg3_suspend(struct device *device)
15556 {
15557         struct pci_dev *pdev = to_pci_dev(device);
15558         struct net_device *dev = pci_get_drvdata(pdev);
15559         struct tg3 *tp = netdev_priv(dev);
15560         int err;
15561
15562         if (!netif_running(dev))
15563                 return 0;
15564
15565         flush_work_sync(&tp->reset_task);
15566         tg3_phy_stop(tp);
15567         tg3_netif_stop(tp);
15568
15569         del_timer_sync(&tp->timer);
15570
15571         tg3_full_lock(tp, 1);
15572         tg3_disable_ints(tp);
15573         tg3_full_unlock(tp);
15574
15575         netif_device_detach(dev);
15576
15577         tg3_full_lock(tp, 0);
15578         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15579         tg3_flag_clear(tp, INIT_COMPLETE);
15580         tg3_full_unlock(tp);
15581
15582         err = tg3_power_down_prepare(tp);
15583         if (err) {
15584                 int err2;
15585
15586                 tg3_full_lock(tp, 0);
15587
15588                 tg3_flag_set(tp, INIT_COMPLETE);
15589                 err2 = tg3_restart_hw(tp, 1);
15590                 if (err2)
15591                         goto out;
15592
15593                 tp->timer.expires = jiffies + tp->timer_offset;
15594                 add_timer(&tp->timer);
15595
15596                 netif_device_attach(dev);
15597                 tg3_netif_start(tp);
15598
15599 out:
15600                 tg3_full_unlock(tp);
15601
15602                 if (!err2)
15603                         tg3_phy_start(tp);
15604         }
15605
15606         return err;
15607 }
15608
15609 static int tg3_resume(struct device *device)
15610 {
15611         struct pci_dev *pdev = to_pci_dev(device);
15612         struct net_device *dev = pci_get_drvdata(pdev);
15613         struct tg3 *tp = netdev_priv(dev);
15614         int err;
15615
15616         if (!netif_running(dev))
15617                 return 0;
15618
15619         netif_device_attach(dev);
15620
15621         tg3_full_lock(tp, 0);
15622
15623         tg3_flag_set(tp, INIT_COMPLETE);
15624         err = tg3_restart_hw(tp, 1);
15625         if (err)
15626                 goto out;
15627
15628         tp->timer.expires = jiffies + tp->timer_offset;
15629         add_timer(&tp->timer);
15630
15631         tg3_netif_start(tp);
15632
15633 out:
15634         tg3_full_unlock(tp);
15635
15636         if (!err)
15637                 tg3_phy_start(tp);
15638
15639         return err;
15640 }
15641
15642 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15643 #define TG3_PM_OPS (&tg3_pm_ops)
15644
15645 #else
15646
15647 #define TG3_PM_OPS NULL
15648
15649 #endif /* CONFIG_PM_SLEEP */
15650
15651 /**
15652  * tg3_io_error_detected - called when PCI error is detected
15653  * @pdev: Pointer to PCI device
15654  * @state: The current pci connection state
15655  *
15656  * This function is called after a PCI bus error affecting
15657  * this device has been detected.
15658  */
15659 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15660                                               pci_channel_state_t state)
15661 {
15662         struct net_device *netdev = pci_get_drvdata(pdev);
15663         struct tg3 *tp = netdev_priv(netdev);
15664         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15665
15666         netdev_info(netdev, "PCI I/O error detected\n");
15667
15668         rtnl_lock();
15669
15670         if (!netif_running(netdev))
15671                 goto done;
15672
15673         tg3_phy_stop(tp);
15674
15675         tg3_netif_stop(tp);
15676
15677         del_timer_sync(&tp->timer);
15678         tg3_flag_clear(tp, RESTART_TIMER);
15679
15680         /* Want to make sure that the reset task doesn't run */
15681         cancel_work_sync(&tp->reset_task);
15682         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15683         tg3_flag_clear(tp, RESTART_TIMER);
15684
15685         netif_device_detach(netdev);
15686
15687         /* Clean up software state, even if MMIO is blocked */
15688         tg3_full_lock(tp, 0);
15689         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15690         tg3_full_unlock(tp);
15691
15692 done:
15693         if (state == pci_channel_io_perm_failure)
15694                 err = PCI_ERS_RESULT_DISCONNECT;
15695         else
15696                 pci_disable_device(pdev);
15697
15698         rtnl_unlock();
15699
15700         return err;
15701 }
15702
15703 /**
15704  * tg3_io_slot_reset - called after the pci bus has been reset.
15705  * @pdev: Pointer to PCI device
15706  *
15707  * Restart the card from scratch, as if from a cold-boot.
15708  * At this point, the card has exprienced a hard reset,
15709  * followed by fixups by BIOS, and has its config space
15710  * set up identically to what it was at cold boot.
15711  */
15712 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15713 {
15714         struct net_device *netdev = pci_get_drvdata(pdev);
15715         struct tg3 *tp = netdev_priv(netdev);
15716         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15717         int err;
15718
15719         rtnl_lock();
15720
15721         if (pci_enable_device(pdev)) {
15722                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15723                 goto done;
15724         }
15725
15726         pci_set_master(pdev);
15727         pci_restore_state(pdev);
15728         pci_save_state(pdev);
15729
15730         if (!netif_running(netdev)) {
15731                 rc = PCI_ERS_RESULT_RECOVERED;
15732                 goto done;
15733         }
15734
15735         err = tg3_power_up(tp);
15736         if (err)
15737                 goto done;
15738
15739         rc = PCI_ERS_RESULT_RECOVERED;
15740
15741 done:
15742         rtnl_unlock();
15743
15744         return rc;
15745 }
15746
15747 /**
15748  * tg3_io_resume - called when traffic can start flowing again.
15749  * @pdev: Pointer to PCI device
15750  *
15751  * This callback is called when the error recovery driver tells
15752  * us that its OK to resume normal operation.
15753  */
15754 static void tg3_io_resume(struct pci_dev *pdev)
15755 {
15756         struct net_device *netdev = pci_get_drvdata(pdev);
15757         struct tg3 *tp = netdev_priv(netdev);
15758         int err;
15759
15760         rtnl_lock();
15761
15762         if (!netif_running(netdev))
15763                 goto done;
15764
15765         tg3_full_lock(tp, 0);
15766         tg3_flag_set(tp, INIT_COMPLETE);
15767         err = tg3_restart_hw(tp, 1);
15768         tg3_full_unlock(tp);
15769         if (err) {
15770                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15771                 goto done;
15772         }
15773
15774         netif_device_attach(netdev);
15775
15776         tp->timer.expires = jiffies + tp->timer_offset;
15777         add_timer(&tp->timer);
15778
15779         tg3_netif_start(tp);
15780
15781         tg3_phy_start(tp);
15782
15783 done:
15784         rtnl_unlock();
15785 }
15786
15787 static struct pci_error_handlers tg3_err_handler = {
15788         .error_detected = tg3_io_error_detected,
15789         .slot_reset     = tg3_io_slot_reset,
15790         .resume         = tg3_io_resume
15791 };
15792
15793 static struct pci_driver tg3_driver = {
15794         .name           = DRV_MODULE_NAME,
15795         .id_table       = tg3_pci_tbl,
15796         .probe          = tg3_init_one,
15797         .remove         = __devexit_p(tg3_remove_one),
15798         .err_handler    = &tg3_err_handler,
15799         .driver.pm      = TG3_PM_OPS,
15800 };
15801
15802 static int __init tg3_init(void)
15803 {
15804         return pci_register_driver(&tg3_driver);
15805 }
15806
15807 static void __exit tg3_cleanup(void)
15808 {
15809         pci_unregister_driver(&tg3_driver);
15810 }
15811
15812 module_init(tg3_init);
15813 module_exit(tg3_cleanup);