Merge branch 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb...
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     119
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "May 18, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
296         {}
297 };
298
299 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300
301 static const struct {
302         const char string[ETH_GSTRING_LEN];
303 } ethtool_stats_keys[] = {
304         { "rx_octets" },
305         { "rx_fragments" },
306         { "rx_ucast_packets" },
307         { "rx_mcast_packets" },
308         { "rx_bcast_packets" },
309         { "rx_fcs_errors" },
310         { "rx_align_errors" },
311         { "rx_xon_pause_rcvd" },
312         { "rx_xoff_pause_rcvd" },
313         { "rx_mac_ctrl_rcvd" },
314         { "rx_xoff_entered" },
315         { "rx_frame_too_long_errors" },
316         { "rx_jabbers" },
317         { "rx_undersize_packets" },
318         { "rx_in_length_errors" },
319         { "rx_out_length_errors" },
320         { "rx_64_or_less_octet_packets" },
321         { "rx_65_to_127_octet_packets" },
322         { "rx_128_to_255_octet_packets" },
323         { "rx_256_to_511_octet_packets" },
324         { "rx_512_to_1023_octet_packets" },
325         { "rx_1024_to_1522_octet_packets" },
326         { "rx_1523_to_2047_octet_packets" },
327         { "rx_2048_to_4095_octet_packets" },
328         { "rx_4096_to_8191_octet_packets" },
329         { "rx_8192_to_9022_octet_packets" },
330
331         { "tx_octets" },
332         { "tx_collisions" },
333
334         { "tx_xon_sent" },
335         { "tx_xoff_sent" },
336         { "tx_flow_control" },
337         { "tx_mac_errors" },
338         { "tx_single_collisions" },
339         { "tx_mult_collisions" },
340         { "tx_deferred" },
341         { "tx_excessive_collisions" },
342         { "tx_late_collisions" },
343         { "tx_collide_2times" },
344         { "tx_collide_3times" },
345         { "tx_collide_4times" },
346         { "tx_collide_5times" },
347         { "tx_collide_6times" },
348         { "tx_collide_7times" },
349         { "tx_collide_8times" },
350         { "tx_collide_9times" },
351         { "tx_collide_10times" },
352         { "tx_collide_11times" },
353         { "tx_collide_12times" },
354         { "tx_collide_13times" },
355         { "tx_collide_14times" },
356         { "tx_collide_15times" },
357         { "tx_ucast_packets" },
358         { "tx_mcast_packets" },
359         { "tx_bcast_packets" },
360         { "tx_carrier_sense_errors" },
361         { "tx_discards" },
362         { "tx_errors" },
363
364         { "dma_writeq_full" },
365         { "dma_write_prioq_full" },
366         { "rxbds_empty" },
367         { "rx_discards" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" },
380
381         { "mbuf_lwm_thresh_hit" },
382 };
383
384 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
385
386
387 static const struct {
388         const char string[ETH_GSTRING_LEN];
389 } ethtool_test_keys[] = {
390         { "nvram test     (online) " },
391         { "link test      (online) " },
392         { "register test  (offline)" },
393         { "memory test    (offline)" },
394         { "loopback test  (offline)" },
395         { "interrupt test (offline)" },
396 };
397
398 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
399
400
401 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 {
403         writel(val, tp->regs + off);
404 }
405
406 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 {
408         return readl(tp->regs + off);
409 }
410
411 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 {
413         writel(val, tp->aperegs + off);
414 }
415
416 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 {
418         return readl(tp->aperegs + off);
419 }
420
421 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
422 {
423         unsigned long flags;
424
425         spin_lock_irqsave(&tp->indirect_lock, flags);
426         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
428         spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 }
430
431 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->regs + off);
434         readl(tp->regs + off);
435 }
436
437 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
438 {
439         unsigned long flags;
440         u32 val;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
444         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
445         spin_unlock_irqrestore(&tp->indirect_lock, flags);
446         return val;
447 }
448
449 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
454                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
455                                        TG3_64BIT_REG_LOW, val);
456                 return;
457         }
458         if (off == TG3_RX_STD_PROD_IDX_REG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
460                                        TG3_64BIT_REG_LOW, val);
461                 return;
462         }
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
467         spin_unlock_irqrestore(&tp->indirect_lock, flags);
468
469         /* In indirect mode when disabling interrupts, we also need
470          * to clear the interrupt bit in the GRC local ctrl register.
471          */
472         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473             (val == 0x1)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
475                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
476         }
477 }
478
479 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
480 {
481         unsigned long flags;
482         u32 val;
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488         return val;
489 }
490
491 /* usec_wait specifies the wait time in usec when writing to certain registers
492  * where it is unsafe to read back the register without some delay.
493  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
494  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495  */
496 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 {
498         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
499                 /* Non-posted methods */
500                 tp->write32(tp, off, val);
501         else {
502                 /* Posted method */
503                 tg3_write32(tp, off, val);
504                 if (usec_wait)
505                         udelay(usec_wait);
506                 tp->read32(tp, off);
507         }
508         /* Wait again after the read for the posted method to guarantee that
509          * the wait time is met.
510          */
511         if (usec_wait)
512                 udelay(usec_wait);
513 }
514
515 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 {
517         tp->write32_mbox(tp, off, val);
518         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
519                 tp->read32_mbox(tp, off);
520 }
521
522 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         void __iomem *mbox = tp->regs + off;
525         writel(val, mbox);
526         if (tg3_flag(tp, TXD_MBOX_HWBUG))
527                 writel(val, mbox);
528         if (tg3_flag(tp, MBOX_WRITE_REORDER))
529                 readl(mbox);
530 }
531
532 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 {
534         return readl(tp->regs + off + GRCMBOX_BASE);
535 }
536
537 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 {
539         writel(val, tp->regs + off + GRCMBOX_BASE);
540 }
541
542 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
543 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
544 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
545 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
546 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
547
548 #define tw32(reg, val)                  tp->write32(tp, reg, val)
549 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
550 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
551 #define tr32(reg)                       tp->read32(tp, reg)
552
553 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
554 {
555         unsigned long flags;
556
557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
558             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559                 return;
560
561         spin_lock_irqsave(&tp->indirect_lock, flags);
562         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
563                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565
566                 /* Always leave this as zero. */
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568         } else {
569                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571
572                 /* Always leave this as zero. */
573                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574         }
575         spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 }
577
578 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
579 {
580         unsigned long flags;
581
582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
583             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
584                 *val = 0;
585                 return;
586         }
587
588         spin_lock_irqsave(&tp->indirect_lock, flags);
589         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
590                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
591                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592
593                 /* Always leave this as zero. */
594                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595         } else {
596                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
597                 *val = tr32(TG3PCI_MEM_WIN_DATA);
598
599                 /* Always leave this as zero. */
600                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601         }
602         spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 }
604
605 static void tg3_ape_lock_init(struct tg3 *tp)
606 {
607         int i;
608         u32 regbase;
609
610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
611                 regbase = TG3_APE_LOCK_GRANT;
612         else
613                 regbase = TG3_APE_PER_LOCK_GRANT;
614
615         /* Make sure the driver hasn't any stale locks. */
616         for (i = 0; i < 8; i++)
617                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 }
619
620 static int tg3_ape_lock(struct tg3 *tp, int locknum)
621 {
622         int i, off;
623         int ret = 0;
624         u32 status, req, gnt;
625
626         if (!tg3_flag(tp, ENABLE_APE))
627                 return 0;
628
629         switch (locknum) {
630         case TG3_APE_LOCK_GRC:
631         case TG3_APE_LOCK_MEM:
632                 break;
633         default:
634                 return -EINVAL;
635         }
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
638                 req = TG3_APE_LOCK_REQ;
639                 gnt = TG3_APE_LOCK_GRANT;
640         } else {
641                 req = TG3_APE_PER_LOCK_REQ;
642                 gnt = TG3_APE_PER_LOCK_GRANT;
643         }
644
645         off = 4 * locknum;
646
647         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648
649         /* Wait for up to 1 millisecond to acquire lock. */
650         for (i = 0; i < 100; i++) {
651                 status = tg3_ape_read32(tp, gnt + off);
652                 if (status == APE_LOCK_GRANT_DRIVER)
653                         break;
654                 udelay(10);
655         }
656
657         if (status != APE_LOCK_GRANT_DRIVER) {
658                 /* Revoke the lock request. */
659                 tg3_ape_write32(tp, gnt + off,
660                                 APE_LOCK_GRANT_DRIVER);
661
662                 ret = -EBUSY;
663         }
664
665         return ret;
666 }
667
668 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
669 {
670         u32 gnt;
671
672         if (!tg3_flag(tp, ENABLE_APE))
673                 return;
674
675         switch (locknum) {
676         case TG3_APE_LOCK_GRC:
677         case TG3_APE_LOCK_MEM:
678                 break;
679         default:
680                 return;
681         }
682
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
684                 gnt = TG3_APE_LOCK_GRANT;
685         else
686                 gnt = TG3_APE_PER_LOCK_GRANT;
687
688         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 }
690
691 static void tg3_disable_ints(struct tg3 *tp)
692 {
693         int i;
694
695         tw32(TG3PCI_MISC_HOST_CTRL,
696              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
697         for (i = 0; i < tp->irq_max; i++)
698                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 }
700
701 static void tg3_enable_ints(struct tg3 *tp)
702 {
703         int i;
704
705         tp->irq_sync = 0;
706         wmb();
707
708         tw32(TG3PCI_MISC_HOST_CTRL,
709              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710
711         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
712         for (i = 0; i < tp->irq_cnt; i++) {
713                 struct tg3_napi *tnapi = &tp->napi[i];
714
715                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716                 if (tg3_flag(tp, 1SHOT_MSI))
717                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718
719                 tp->coal_now |= tnapi->coal_now;
720         }
721
722         /* Force an initial interrupt */
723         if (!tg3_flag(tp, TAGGED_STATUS) &&
724             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
725                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726         else
727                 tw32(HOSTCC_MODE, tp->coal_now);
728
729         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 }
731
732 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 {
734         struct tg3 *tp = tnapi->tp;
735         struct tg3_hw_status *sblk = tnapi->hw_status;
736         unsigned int work_exists = 0;
737
738         /* check for phy events */
739         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
740                 if (sblk->status & SD_STATUS_LINK_CHG)
741                         work_exists = 1;
742         }
743         /* check for RX/TX work to do */
744         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
745             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
746                 work_exists = 1;
747
748         return work_exists;
749 }
750
751 /* tg3_int_reenable
752  *  similar to tg3_enable_ints, but it accurately determines whether there
753  *  is new work pending and can return without flushing the PIO write
754  *  which reenables interrupts
755  */
756 static void tg3_int_reenable(struct tg3_napi *tnapi)
757 {
758         struct tg3 *tp = tnapi->tp;
759
760         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
761         mmiowb();
762
763         /* When doing tagged status, this work check is unnecessary.
764          * The last_tag we write above tells the chip which piece of
765          * work we've completed.
766          */
767         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
768                 tw32(HOSTCC_MODE, tp->coalesce_mode |
769                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 }
771
772 static void tg3_switch_clocks(struct tg3 *tp)
773 {
774         u32 clock_ctrl;
775         u32 orig_clock_ctrl;
776
777         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
778                 return;
779
780         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
781
782         orig_clock_ctrl = clock_ctrl;
783         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
784                        CLOCK_CTRL_CLKRUN_OENABLE |
785                        0x1f);
786         tp->pci_clock_ctrl = clock_ctrl;
787
788         if (tg3_flag(tp, 5705_PLUS)) {
789                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
790                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
791                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
792                 }
793         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
794                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
795                             clock_ctrl |
796                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
797                             40);
798                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
799                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
800                             40);
801         }
802         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 }
804
805 #define PHY_BUSY_LOOPS  5000
806
807 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
808 {
809         u32 frame_val;
810         unsigned int loops;
811         int ret;
812
813         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
814                 tw32_f(MAC_MI_MODE,
815                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
816                 udelay(80);
817         }
818
819         *val = 0x0;
820
821         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
822                       MI_COM_PHY_ADDR_MASK);
823         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
824                       MI_COM_REG_ADDR_MASK);
825         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
826
827         tw32_f(MAC_MI_COM, frame_val);
828
829         loops = PHY_BUSY_LOOPS;
830         while (loops != 0) {
831                 udelay(10);
832                 frame_val = tr32(MAC_MI_COM);
833
834                 if ((frame_val & MI_COM_BUSY) == 0) {
835                         udelay(5);
836                         frame_val = tr32(MAC_MI_COM);
837                         break;
838                 }
839                 loops -= 1;
840         }
841
842         ret = -EBUSY;
843         if (loops != 0) {
844                 *val = frame_val & MI_COM_DATA_MASK;
845                 ret = 0;
846         }
847
848         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
849                 tw32_f(MAC_MI_MODE, tp->mi_mode);
850                 udelay(80);
851         }
852
853         return ret;
854 }
855
856 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
857 {
858         u32 frame_val;
859         unsigned int loops;
860         int ret;
861
862         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
863             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
864                 return 0;
865
866         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
867                 tw32_f(MAC_MI_MODE,
868                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
869                 udelay(80);
870         }
871
872         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
873                       MI_COM_PHY_ADDR_MASK);
874         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
875                       MI_COM_REG_ADDR_MASK);
876         frame_val |= (val & MI_COM_DATA_MASK);
877         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
878
879         tw32_f(MAC_MI_COM, frame_val);
880
881         loops = PHY_BUSY_LOOPS;
882         while (loops != 0) {
883                 udelay(10);
884                 frame_val = tr32(MAC_MI_COM);
885                 if ((frame_val & MI_COM_BUSY) == 0) {
886                         udelay(5);
887                         frame_val = tr32(MAC_MI_COM);
888                         break;
889                 }
890                 loops -= 1;
891         }
892
893         ret = -EBUSY;
894         if (loops != 0)
895                 ret = 0;
896
897         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
898                 tw32_f(MAC_MI_MODE, tp->mi_mode);
899                 udelay(80);
900         }
901
902         return ret;
903 }
904
905 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
906 {
907         int err;
908
909         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
910         if (err)
911                 goto done;
912
913         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
914         if (err)
915                 goto done;
916
917         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
918                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
919         if (err)
920                 goto done;
921
922         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
923
924 done:
925         return err;
926 }
927
928 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
929 {
930         int err;
931
932         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
933         if (err)
934                 goto done;
935
936         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
937         if (err)
938                 goto done;
939
940         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
941                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
942         if (err)
943                 goto done;
944
945         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
946
947 done:
948         return err;
949 }
950
951 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
952 {
953         int err;
954
955         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
956         if (!err)
957                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
958
959         return err;
960 }
961
962 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
963 {
964         int err;
965
966         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
967         if (!err)
968                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
969
970         return err;
971 }
972
973 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
974 {
975         int err;
976
977         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
978                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
979                            MII_TG3_AUXCTL_SHDWSEL_MISC);
980         if (!err)
981                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
982
983         return err;
984 }
985
986 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
987 {
988         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
989                 set |= MII_TG3_AUXCTL_MISC_WREN;
990
991         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 }
993
994 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
995         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
996                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
997                              MII_TG3_AUXCTL_ACTL_TX_6DB)
998
999 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1000         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1001                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1002
1003 static int tg3_bmcr_reset(struct tg3 *tp)
1004 {
1005         u32 phy_control;
1006         int limit, err;
1007
1008         /* OK, reset it, and poll the BMCR_RESET bit until it
1009          * clears or we time out.
1010          */
1011         phy_control = BMCR_RESET;
1012         err = tg3_writephy(tp, MII_BMCR, phy_control);
1013         if (err != 0)
1014                 return -EBUSY;
1015
1016         limit = 5000;
1017         while (limit--) {
1018                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1019                 if (err != 0)
1020                         return -EBUSY;
1021
1022                 if ((phy_control & BMCR_RESET) == 0) {
1023                         udelay(40);
1024                         break;
1025                 }
1026                 udelay(10);
1027         }
1028         if (limit < 0)
1029                 return -EBUSY;
1030
1031         return 0;
1032 }
1033
1034 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1035 {
1036         struct tg3 *tp = bp->priv;
1037         u32 val;
1038
1039         spin_lock_bh(&tp->lock);
1040
1041         if (tg3_readphy(tp, reg, &val))
1042                 val = -EIO;
1043
1044         spin_unlock_bh(&tp->lock);
1045
1046         return val;
1047 }
1048
1049 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1050 {
1051         struct tg3 *tp = bp->priv;
1052         u32 ret = 0;
1053
1054         spin_lock_bh(&tp->lock);
1055
1056         if (tg3_writephy(tp, reg, val))
1057                 ret = -EIO;
1058
1059         spin_unlock_bh(&tp->lock);
1060
1061         return ret;
1062 }
1063
1064 static int tg3_mdio_reset(struct mii_bus *bp)
1065 {
1066         return 0;
1067 }
1068
1069 static void tg3_mdio_config_5785(struct tg3 *tp)
1070 {
1071         u32 val;
1072         struct phy_device *phydev;
1073
1074         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1075         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1076         case PHY_ID_BCM50610:
1077         case PHY_ID_BCM50610M:
1078                 val = MAC_PHYCFG2_50610_LED_MODES;
1079                 break;
1080         case PHY_ID_BCMAC131:
1081                 val = MAC_PHYCFG2_AC131_LED_MODES;
1082                 break;
1083         case PHY_ID_RTL8211C:
1084                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8201E:
1087                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1088                 break;
1089         default:
1090                 return;
1091         }
1092
1093         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1094                 tw32(MAC_PHYCFG2, val);
1095
1096                 val = tr32(MAC_PHYCFG1);
1097                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1098                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1099                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1100                 tw32(MAC_PHYCFG1, val);
1101
1102                 return;
1103         }
1104
1105         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1106                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1107                        MAC_PHYCFG2_FMODE_MASK_MASK |
1108                        MAC_PHYCFG2_GMODE_MASK_MASK |
1109                        MAC_PHYCFG2_ACT_MASK_MASK   |
1110                        MAC_PHYCFG2_QUAL_MASK_MASK |
1111                        MAC_PHYCFG2_INBAND_ENABLE;
1112
1113         tw32(MAC_PHYCFG2, val);
1114
1115         val = tr32(MAC_PHYCFG1);
1116         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1117                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1118         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1119                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1120                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1121                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1122                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1123         }
1124         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1125                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1126         tw32(MAC_PHYCFG1, val);
1127
1128         val = tr32(MAC_EXT_RGMII_MODE);
1129         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1130                  MAC_RGMII_MODE_RX_QUALITY |
1131                  MAC_RGMII_MODE_RX_ACTIVITY |
1132                  MAC_RGMII_MODE_RX_ENG_DET |
1133                  MAC_RGMII_MODE_TX_ENABLE |
1134                  MAC_RGMII_MODE_TX_LOWPWR |
1135                  MAC_RGMII_MODE_TX_RESET);
1136         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1137                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1138                         val |= MAC_RGMII_MODE_RX_INT_B |
1139                                MAC_RGMII_MODE_RX_QUALITY |
1140                                MAC_RGMII_MODE_RX_ACTIVITY |
1141                                MAC_RGMII_MODE_RX_ENG_DET;
1142                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1143                         val |= MAC_RGMII_MODE_TX_ENABLE |
1144                                MAC_RGMII_MODE_TX_LOWPWR |
1145                                MAC_RGMII_MODE_TX_RESET;
1146         }
1147         tw32(MAC_EXT_RGMII_MODE, val);
1148 }
1149
1150 static void tg3_mdio_start(struct tg3 *tp)
1151 {
1152         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1153         tw32_f(MAC_MI_MODE, tp->mi_mode);
1154         udelay(80);
1155
1156         if (tg3_flag(tp, MDIOBUS_INITED) &&
1157             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1158                 tg3_mdio_config_5785(tp);
1159 }
1160
1161 static int tg3_mdio_init(struct tg3 *tp)
1162 {
1163         int i;
1164         u32 reg;
1165         struct phy_device *phydev;
1166
1167         if (tg3_flag(tp, 5717_PLUS)) {
1168                 u32 is_serdes;
1169
1170                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1171
1172                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1173                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1174                 else
1175                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1176                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1177                 if (is_serdes)
1178                         tp->phy_addr += 7;
1179         } else
1180                 tp->phy_addr = TG3_PHY_MII_ADDR;
1181
1182         tg3_mdio_start(tp);
1183
1184         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1185                 return 0;
1186
1187         tp->mdio_bus = mdiobus_alloc();
1188         if (tp->mdio_bus == NULL)
1189                 return -ENOMEM;
1190
1191         tp->mdio_bus->name     = "tg3 mdio bus";
1192         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1193                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1194         tp->mdio_bus->priv     = tp;
1195         tp->mdio_bus->parent   = &tp->pdev->dev;
1196         tp->mdio_bus->read     = &tg3_mdio_read;
1197         tp->mdio_bus->write    = &tg3_mdio_write;
1198         tp->mdio_bus->reset    = &tg3_mdio_reset;
1199         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1200         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1201
1202         for (i = 0; i < PHY_MAX_ADDR; i++)
1203                 tp->mdio_bus->irq[i] = PHY_POLL;
1204
1205         /* The bus registration will look for all the PHYs on the mdio bus.
1206          * Unfortunately, it does not ensure the PHY is powered up before
1207          * accessing the PHY ID registers.  A chip reset is the
1208          * quickest way to bring the device back to an operational state..
1209          */
1210         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1211                 tg3_bmcr_reset(tp);
1212
1213         i = mdiobus_register(tp->mdio_bus);
1214         if (i) {
1215                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1216                 mdiobus_free(tp->mdio_bus);
1217                 return i;
1218         }
1219
1220         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1221
1222         if (!phydev || !phydev->drv) {
1223                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1224                 mdiobus_unregister(tp->mdio_bus);
1225                 mdiobus_free(tp->mdio_bus);
1226                 return -ENODEV;
1227         }
1228
1229         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1230         case PHY_ID_BCM57780:
1231                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1232                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1233                 break;
1234         case PHY_ID_BCM50610:
1235         case PHY_ID_BCM50610M:
1236                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1237                                      PHY_BRCM_RX_REFCLK_UNUSED |
1238                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1239                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1240                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1241                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1242                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1243                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1244                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1245                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1246                 /* fallthru */
1247         case PHY_ID_RTL8211C:
1248                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1249                 break;
1250         case PHY_ID_RTL8201E:
1251         case PHY_ID_BCMAC131:
1252                 phydev->interface = PHY_INTERFACE_MODE_MII;
1253                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1254                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1255                 break;
1256         }
1257
1258         tg3_flag_set(tp, MDIOBUS_INITED);
1259
1260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1261                 tg3_mdio_config_5785(tp);
1262
1263         return 0;
1264 }
1265
1266 static void tg3_mdio_fini(struct tg3 *tp)
1267 {
1268         if (tg3_flag(tp, MDIOBUS_INITED)) {
1269                 tg3_flag_clear(tp, MDIOBUS_INITED);
1270                 mdiobus_unregister(tp->mdio_bus);
1271                 mdiobus_free(tp->mdio_bus);
1272         }
1273 }
1274
1275 /* tp->lock is held. */
1276 static inline void tg3_generate_fw_event(struct tg3 *tp)
1277 {
1278         u32 val;
1279
1280         val = tr32(GRC_RX_CPU_EVENT);
1281         val |= GRC_RX_CPU_DRIVER_EVENT;
1282         tw32_f(GRC_RX_CPU_EVENT, val);
1283
1284         tp->last_event_jiffies = jiffies;
1285 }
1286
1287 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1288
1289 /* tp->lock is held. */
1290 static void tg3_wait_for_event_ack(struct tg3 *tp)
1291 {
1292         int i;
1293         unsigned int delay_cnt;
1294         long time_remain;
1295
1296         /* If enough time has passed, no wait is necessary. */
1297         time_remain = (long)(tp->last_event_jiffies + 1 +
1298                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1299                       (long)jiffies;
1300         if (time_remain < 0)
1301                 return;
1302
1303         /* Check if we can shorten the wait time. */
1304         delay_cnt = jiffies_to_usecs(time_remain);
1305         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1306                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1307         delay_cnt = (delay_cnt >> 3) + 1;
1308
1309         for (i = 0; i < delay_cnt; i++) {
1310                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1311                         break;
1312                 udelay(8);
1313         }
1314 }
1315
1316 /* tp->lock is held. */
1317 static void tg3_ump_link_report(struct tg3 *tp)
1318 {
1319         u32 reg;
1320         u32 val;
1321
1322         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1323                 return;
1324
1325         tg3_wait_for_event_ack(tp);
1326
1327         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1328
1329         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1330
1331         val = 0;
1332         if (!tg3_readphy(tp, MII_BMCR, &reg))
1333                 val = reg << 16;
1334         if (!tg3_readphy(tp, MII_BMSR, &reg))
1335                 val |= (reg & 0xffff);
1336         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1337
1338         val = 0;
1339         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1340                 val = reg << 16;
1341         if (!tg3_readphy(tp, MII_LPA, &reg))
1342                 val |= (reg & 0xffff);
1343         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1344
1345         val = 0;
1346         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1347                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1348                         val = reg << 16;
1349                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1350                         val |= (reg & 0xffff);
1351         }
1352         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1353
1354         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1355                 val = reg << 16;
1356         else
1357                 val = 0;
1358         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1359
1360         tg3_generate_fw_event(tp);
1361 }
1362
1363 static void tg3_link_report(struct tg3 *tp)
1364 {
1365         if (!netif_carrier_ok(tp->dev)) {
1366                 netif_info(tp, link, tp->dev, "Link is down\n");
1367                 tg3_ump_link_report(tp);
1368         } else if (netif_msg_link(tp)) {
1369                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1370                             (tp->link_config.active_speed == SPEED_1000 ?
1371                              1000 :
1372                              (tp->link_config.active_speed == SPEED_100 ?
1373                               100 : 10)),
1374                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1375                              "full" : "half"));
1376
1377                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1378                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1379                             "on" : "off",
1380                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1381                             "on" : "off");
1382
1383                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1384                         netdev_info(tp->dev, "EEE is %s\n",
1385                                     tp->setlpicnt ? "enabled" : "disabled");
1386
1387                 tg3_ump_link_report(tp);
1388         }
1389 }
1390
1391 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1392 {
1393         u16 miireg;
1394
1395         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1396                 miireg = ADVERTISE_PAUSE_CAP;
1397         else if (flow_ctrl & FLOW_CTRL_TX)
1398                 miireg = ADVERTISE_PAUSE_ASYM;
1399         else if (flow_ctrl & FLOW_CTRL_RX)
1400                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1401         else
1402                 miireg = 0;
1403
1404         return miireg;
1405 }
1406
1407 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1408 {
1409         u16 miireg;
1410
1411         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1412                 miireg = ADVERTISE_1000XPAUSE;
1413         else if (flow_ctrl & FLOW_CTRL_TX)
1414                 miireg = ADVERTISE_1000XPSE_ASYM;
1415         else if (flow_ctrl & FLOW_CTRL_RX)
1416                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1417         else
1418                 miireg = 0;
1419
1420         return miireg;
1421 }
1422
1423 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1424 {
1425         u8 cap = 0;
1426
1427         if (lcladv & ADVERTISE_1000XPAUSE) {
1428                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1429                         if (rmtadv & LPA_1000XPAUSE)
1430                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1431                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1432                                 cap = FLOW_CTRL_RX;
1433                 } else {
1434                         if (rmtadv & LPA_1000XPAUSE)
1435                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436                 }
1437         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1438                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1439                         cap = FLOW_CTRL_TX;
1440         }
1441
1442         return cap;
1443 }
1444
1445 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1446 {
1447         u8 autoneg;
1448         u8 flowctrl = 0;
1449         u32 old_rx_mode = tp->rx_mode;
1450         u32 old_tx_mode = tp->tx_mode;
1451
1452         if (tg3_flag(tp, USE_PHYLIB))
1453                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1454         else
1455                 autoneg = tp->link_config.autoneg;
1456
1457         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1458                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1459                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1460                 else
1461                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1462         } else
1463                 flowctrl = tp->link_config.flowctrl;
1464
1465         tp->link_config.active_flowctrl = flowctrl;
1466
1467         if (flowctrl & FLOW_CTRL_RX)
1468                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1469         else
1470                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1471
1472         if (old_rx_mode != tp->rx_mode)
1473                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1474
1475         if (flowctrl & FLOW_CTRL_TX)
1476                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1477         else
1478                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1479
1480         if (old_tx_mode != tp->tx_mode)
1481                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 }
1483
1484 static void tg3_adjust_link(struct net_device *dev)
1485 {
1486         u8 oldflowctrl, linkmesg = 0;
1487         u32 mac_mode, lcl_adv, rmt_adv;
1488         struct tg3 *tp = netdev_priv(dev);
1489         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1490
1491         spin_lock_bh(&tp->lock);
1492
1493         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1494                                     MAC_MODE_HALF_DUPLEX);
1495
1496         oldflowctrl = tp->link_config.active_flowctrl;
1497
1498         if (phydev->link) {
1499                 lcl_adv = 0;
1500                 rmt_adv = 0;
1501
1502                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1503                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1504                 else if (phydev->speed == SPEED_1000 ||
1505                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1506                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1507                 else
1508                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1509
1510                 if (phydev->duplex == DUPLEX_HALF)
1511                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1512                 else {
1513                         lcl_adv = tg3_advert_flowctrl_1000T(
1514                                   tp->link_config.flowctrl);
1515
1516                         if (phydev->pause)
1517                                 rmt_adv = LPA_PAUSE_CAP;
1518                         if (phydev->asym_pause)
1519                                 rmt_adv |= LPA_PAUSE_ASYM;
1520                 }
1521
1522                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1523         } else
1524                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1525
1526         if (mac_mode != tp->mac_mode) {
1527                 tp->mac_mode = mac_mode;
1528                 tw32_f(MAC_MODE, tp->mac_mode);
1529                 udelay(40);
1530         }
1531
1532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1533                 if (phydev->speed == SPEED_10)
1534                         tw32(MAC_MI_STAT,
1535                              MAC_MI_STAT_10MBPS_MODE |
1536                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537                 else
1538                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539         }
1540
1541         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1542                 tw32(MAC_TX_LENGTHS,
1543                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1544                       (6 << TX_LENGTHS_IPG_SHIFT) |
1545                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1546         else
1547                 tw32(MAC_TX_LENGTHS,
1548                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1549                       (6 << TX_LENGTHS_IPG_SHIFT) |
1550                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551
1552         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1553             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1554             phydev->speed != tp->link_config.active_speed ||
1555             phydev->duplex != tp->link_config.active_duplex ||
1556             oldflowctrl != tp->link_config.active_flowctrl)
1557                 linkmesg = 1;
1558
1559         tp->link_config.active_speed = phydev->speed;
1560         tp->link_config.active_duplex = phydev->duplex;
1561
1562         spin_unlock_bh(&tp->lock);
1563
1564         if (linkmesg)
1565                 tg3_link_report(tp);
1566 }
1567
1568 static int tg3_phy_init(struct tg3 *tp)
1569 {
1570         struct phy_device *phydev;
1571
1572         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1573                 return 0;
1574
1575         /* Bring the PHY back to a known state. */
1576         tg3_bmcr_reset(tp);
1577
1578         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1579
1580         /* Attach the MAC to the PHY. */
1581         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1582                              phydev->dev_flags, phydev->interface);
1583         if (IS_ERR(phydev)) {
1584                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1585                 return PTR_ERR(phydev);
1586         }
1587
1588         /* Mask with MAC supported features. */
1589         switch (phydev->interface) {
1590         case PHY_INTERFACE_MODE_GMII:
1591         case PHY_INTERFACE_MODE_RGMII:
1592                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1593                         phydev->supported &= (PHY_GBIT_FEATURES |
1594                                               SUPPORTED_Pause |
1595                                               SUPPORTED_Asym_Pause);
1596                         break;
1597                 }
1598                 /* fallthru */
1599         case PHY_INTERFACE_MODE_MII:
1600                 phydev->supported &= (PHY_BASIC_FEATURES |
1601                                       SUPPORTED_Pause |
1602                                       SUPPORTED_Asym_Pause);
1603                 break;
1604         default:
1605                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1606                 return -EINVAL;
1607         }
1608
1609         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1610
1611         phydev->advertising = phydev->supported;
1612
1613         return 0;
1614 }
1615
1616 static void tg3_phy_start(struct tg3 *tp)
1617 {
1618         struct phy_device *phydev;
1619
1620         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1621                 return;
1622
1623         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1624
1625         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1626                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1627                 phydev->speed = tp->link_config.orig_speed;
1628                 phydev->duplex = tp->link_config.orig_duplex;
1629                 phydev->autoneg = tp->link_config.orig_autoneg;
1630                 phydev->advertising = tp->link_config.orig_advertising;
1631         }
1632
1633         phy_start(phydev);
1634
1635         phy_start_aneg(phydev);
1636 }
1637
1638 static void tg3_phy_stop(struct tg3 *tp)
1639 {
1640         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1641                 return;
1642
1643         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 }
1645
1646 static void tg3_phy_fini(struct tg3 *tp)
1647 {
1648         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1649                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1650                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1651         }
1652 }
1653
1654 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1655 {
1656         u32 phytest;
1657
1658         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1659                 u32 phy;
1660
1661                 tg3_writephy(tp, MII_TG3_FET_TEST,
1662                              phytest | MII_TG3_FET_SHADOW_EN);
1663                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1664                         if (enable)
1665                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666                         else
1667                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1668                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1669                 }
1670                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1671         }
1672 }
1673
1674 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1675 {
1676         u32 reg;
1677
1678         if (!tg3_flag(tp, 5705_PLUS) ||
1679             (tg3_flag(tp, 5717_PLUS) &&
1680              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1681                 return;
1682
1683         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1684                 tg3_phy_fet_toggle_apd(tp, enable);
1685                 return;
1686         }
1687
1688         reg = MII_TG3_MISC_SHDW_WREN |
1689               MII_TG3_MISC_SHDW_SCR5_SEL |
1690               MII_TG3_MISC_SHDW_SCR5_LPED |
1691               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1692               MII_TG3_MISC_SHDW_SCR5_SDTL |
1693               MII_TG3_MISC_SHDW_SCR5_C125OE;
1694         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1695                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1696
1697         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698
1699
1700         reg = MII_TG3_MISC_SHDW_WREN |
1701               MII_TG3_MISC_SHDW_APD_SEL |
1702               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1703         if (enable)
1704                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1705
1706         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 }
1708
1709 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1710 {
1711         u32 phy;
1712
1713         if (!tg3_flag(tp, 5705_PLUS) ||
1714             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1715                 return;
1716
1717         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1718                 u32 ephy;
1719
1720                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1721                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1722
1723                         tg3_writephy(tp, MII_TG3_FET_TEST,
1724                                      ephy | MII_TG3_FET_SHADOW_EN);
1725                         if (!tg3_readphy(tp, reg, &phy)) {
1726                                 if (enable)
1727                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728                                 else
1729                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1730                                 tg3_writephy(tp, reg, phy);
1731                         }
1732                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1733                 }
1734         } else {
1735                 int ret;
1736
1737                 ret = tg3_phy_auxctl_read(tp,
1738                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1739                 if (!ret) {
1740                         if (enable)
1741                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742                         else
1743                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1744                         tg3_phy_auxctl_write(tp,
1745                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1746                 }
1747         }
1748 }
1749
1750 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1751 {
1752         int ret;
1753         u32 val;
1754
1755         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1756                 return;
1757
1758         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1759         if (!ret)
1760                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1761                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 }
1763
1764 static void tg3_phy_apply_otp(struct tg3 *tp)
1765 {
1766         u32 otp, phy;
1767
1768         if (!tp->phy_otp)
1769                 return;
1770
1771         otp = tp->phy_otp;
1772
1773         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1774                 return;
1775
1776         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1777         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1778         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1779
1780         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1781               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1782         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1783
1784         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1785         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1786         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1787
1788         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1789         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1790
1791         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1793
1794         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1795               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1796         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1797
1798         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 }
1800
1801 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1802 {
1803         u32 val;
1804
1805         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1806                 return;
1807
1808         tp->setlpicnt = 0;
1809
1810         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1811             current_link_up == 1 &&
1812             tp->link_config.active_duplex == DUPLEX_FULL &&
1813             (tp->link_config.active_speed == SPEED_100 ||
1814              tp->link_config.active_speed == SPEED_1000)) {
1815                 u32 eeectl;
1816
1817                 if (tp->link_config.active_speed == SPEED_1000)
1818                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1819                 else
1820                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1821
1822                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1823
1824                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1825                                   TG3_CL45_D7_EEERES_STAT, &val);
1826
1827                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1828                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1829                         tp->setlpicnt = 2;
1830         }
1831
1832         if (!tp->setlpicnt) {
1833                 val = tr32(TG3_CPMU_EEE_MODE);
1834                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1835         }
1836 }
1837
1838 static void tg3_phy_eee_enable(struct tg3 *tp)
1839 {
1840         u32 val;
1841
1842         if (tp->link_config.active_speed == SPEED_1000 &&
1843             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1844              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1846             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1847                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1848                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1849         }
1850
1851         val = tr32(TG3_CPMU_EEE_MODE);
1852         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1853 }
1854
1855 static int tg3_wait_macro_done(struct tg3 *tp)
1856 {
1857         int limit = 100;
1858
1859         while (limit--) {
1860                 u32 tmp32;
1861
1862                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1863                         if ((tmp32 & 0x1000) == 0)
1864                                 break;
1865                 }
1866         }
1867         if (limit < 0)
1868                 return -EBUSY;
1869
1870         return 0;
1871 }
1872
1873 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1874 {
1875         static const u32 test_pat[4][6] = {
1876         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1877         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1878         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1879         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1880         };
1881         int chan;
1882
1883         for (chan = 0; chan < 4; chan++) {
1884                 int i;
1885
1886                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1887                              (chan * 0x2000) | 0x0200);
1888                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1889
1890                 for (i = 0; i < 6; i++)
1891                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1892                                      test_pat[chan][i]);
1893
1894                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1895                 if (tg3_wait_macro_done(tp)) {
1896                         *resetp = 1;
1897                         return -EBUSY;
1898                 }
1899
1900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1901                              (chan * 0x2000) | 0x0200);
1902                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1903                 if (tg3_wait_macro_done(tp)) {
1904                         *resetp = 1;
1905                         return -EBUSY;
1906                 }
1907
1908                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1909                 if (tg3_wait_macro_done(tp)) {
1910                         *resetp = 1;
1911                         return -EBUSY;
1912                 }
1913
1914                 for (i = 0; i < 6; i += 2) {
1915                         u32 low, high;
1916
1917                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1918                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1919                             tg3_wait_macro_done(tp)) {
1920                                 *resetp = 1;
1921                                 return -EBUSY;
1922                         }
1923                         low &= 0x7fff;
1924                         high &= 0x000f;
1925                         if (low != test_pat[chan][i] ||
1926                             high != test_pat[chan][i+1]) {
1927                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1928                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1930
1931                                 return -EBUSY;
1932                         }
1933                 }
1934         }
1935
1936         return 0;
1937 }
1938
1939 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1940 {
1941         int chan;
1942
1943         for (chan = 0; chan < 4; chan++) {
1944                 int i;
1945
1946                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1947                              (chan * 0x2000) | 0x0200);
1948                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1949                 for (i = 0; i < 6; i++)
1950                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1952                 if (tg3_wait_macro_done(tp))
1953                         return -EBUSY;
1954         }
1955
1956         return 0;
1957 }
1958
1959 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1960 {
1961         u32 reg32, phy9_orig;
1962         int retries, do_phy_reset, err;
1963
1964         retries = 10;
1965         do_phy_reset = 1;
1966         do {
1967                 if (do_phy_reset) {
1968                         err = tg3_bmcr_reset(tp);
1969                         if (err)
1970                                 return err;
1971                         do_phy_reset = 0;
1972                 }
1973
1974                 /* Disable transmitter and interrupt.  */
1975                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1976                         continue;
1977
1978                 reg32 |= 0x3000;
1979                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1980
1981                 /* Set full-duplex, 1000 mbps.  */
1982                 tg3_writephy(tp, MII_BMCR,
1983                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1984
1985                 /* Set to master mode.  */
1986                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1987                         continue;
1988
1989                 tg3_writephy(tp, MII_TG3_CTRL,
1990                              (MII_TG3_CTRL_AS_MASTER |
1991                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1992
1993                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1994                 if (err)
1995                         return err;
1996
1997                 /* Block the PHY control access.  */
1998                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1999
2000                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2001                 if (!err)
2002                         break;
2003         } while (--retries);
2004
2005         err = tg3_phy_reset_chanpat(tp);
2006         if (err)
2007                 return err;
2008
2009         tg3_phydsp_write(tp, 0x8005, 0x0000);
2010
2011         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2012         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2013
2014         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2015
2016         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2017
2018         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2019                 reg32 &= ~0x3000;
2020                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2021         } else if (!err)
2022                 err = -EBUSY;
2023
2024         return err;
2025 }
2026
2027 /* This will reset the tigon3 PHY if there is no valid
2028  * link unless the FORCE argument is non-zero.
2029  */
2030 static int tg3_phy_reset(struct tg3 *tp)
2031 {
2032         u32 val, cpmuctrl;
2033         int err;
2034
2035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2036                 val = tr32(GRC_MISC_CFG);
2037                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2038                 udelay(40);
2039         }
2040         err  = tg3_readphy(tp, MII_BMSR, &val);
2041         err |= tg3_readphy(tp, MII_BMSR, &val);
2042         if (err != 0)
2043                 return -EBUSY;
2044
2045         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2046                 netif_carrier_off(tp->dev);
2047                 tg3_link_report(tp);
2048         }
2049
2050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2053                 err = tg3_phy_reset_5703_4_5(tp);
2054                 if (err)
2055                         return err;
2056                 goto out;
2057         }
2058
2059         cpmuctrl = 0;
2060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2061             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2062                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2063                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2064                         tw32(TG3_CPMU_CTRL,
2065                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2066         }
2067
2068         err = tg3_bmcr_reset(tp);
2069         if (err)
2070                 return err;
2071
2072         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2073                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2074                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2075
2076                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2077         }
2078
2079         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2080             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2081                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2082                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2083                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2084                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2085                         udelay(40);
2086                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2087                 }
2088         }
2089
2090         if (tg3_flag(tp, 5717_PLUS) &&
2091             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2092                 return 0;
2093
2094         tg3_phy_apply_otp(tp);
2095
2096         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2097                 tg3_phy_toggle_apd(tp, true);
2098         else
2099                 tg3_phy_toggle_apd(tp, false);
2100
2101 out:
2102         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2103             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2104                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2105                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2106                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2107         }
2108
2109         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2110                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112         }
2113
2114         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2115                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2116                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2117                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2118                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2119                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2120                 }
2121         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2122                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2123                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2124                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2125                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2126                                 tg3_writephy(tp, MII_TG3_TEST1,
2127                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2128                         } else
2129                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2130
2131                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2132                 }
2133         }
2134
2135         /* Set Extended packet length bit (bit 14) on all chips that */
2136         /* support jumbo frames */
2137         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2138                 /* Cannot do read-modify-write on 5401 */
2139                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2140         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2141                 /* Set bit 14 with read-modify-write to preserve other bits */
2142                 err = tg3_phy_auxctl_read(tp,
2143                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2144                 if (!err)
2145                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2146                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2147         }
2148
2149         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2150          * jumbo frames transmission.
2151          */
2152         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2153                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2155                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2156         }
2157
2158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2159                 /* adjust output voltage */
2160                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2161         }
2162
2163         tg3_phy_toggle_automdix(tp, 1);
2164         tg3_phy_set_wirespeed(tp);
2165         return 0;
2166 }
2167
2168 static void tg3_frob_aux_power(struct tg3 *tp)
2169 {
2170         bool need_vaux = false;
2171
2172         /* The GPIOs do something completely different on 57765. */
2173         if (!tg3_flag(tp, IS_NIC) ||
2174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2176                 return;
2177
2178         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2182             tp->pdev_peer != tp->pdev) {
2183                 struct net_device *dev_peer;
2184
2185                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2186
2187                 /* remove_one() may have been run on the peer. */
2188                 if (dev_peer) {
2189                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2190
2191                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2192                                 return;
2193
2194                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2195                             tg3_flag(tp_peer, ENABLE_ASF))
2196                                 need_vaux = true;
2197                 }
2198         }
2199
2200         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2201                 need_vaux = true;
2202
2203         if (need_vaux) {
2204                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2205                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2206                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2207                                     (GRC_LCLCTRL_GPIO_OE0 |
2208                                      GRC_LCLCTRL_GPIO_OE1 |
2209                                      GRC_LCLCTRL_GPIO_OE2 |
2210                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2212                                     100);
2213                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2214                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2215                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2216                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2217                                              GRC_LCLCTRL_GPIO_OE1 |
2218                                              GRC_LCLCTRL_GPIO_OE2 |
2219                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2221                                              tp->grc_local_ctrl;
2222                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223
2224                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2225                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2226
2227                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2228                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2229                 } else {
2230                         u32 no_gpio2;
2231                         u32 grc_local_ctrl = 0;
2232
2233                         /* Workaround to prevent overdrawing Amps. */
2234                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2235                             ASIC_REV_5714) {
2236                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2237                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2238                                             grc_local_ctrl, 100);
2239                         }
2240
2241                         /* On 5753 and variants, GPIO2 cannot be used. */
2242                         no_gpio2 = tp->nic_sram_data_cfg &
2243                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2244
2245                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2246                                          GRC_LCLCTRL_GPIO_OE1 |
2247                                          GRC_LCLCTRL_GPIO_OE2 |
2248                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2250                         if (no_gpio2) {
2251                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2252                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2253                         }
2254                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2255                                                     grc_local_ctrl, 100);
2256
2257                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2258
2259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2260                                                     grc_local_ctrl, 100);
2261
2262                         if (!no_gpio2) {
2263                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2264                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2265                                             grc_local_ctrl, 100);
2266                         }
2267                 }
2268         } else {
2269                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2270                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2271                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2272                                     (GRC_LCLCTRL_GPIO_OE1 |
2273                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2274
2275                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2276                                     GRC_LCLCTRL_GPIO_OE1, 100);
2277
2278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2279                                     (GRC_LCLCTRL_GPIO_OE1 |
2280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2281                 }
2282         }
2283 }
2284
2285 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2286 {
2287         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2288                 return 1;
2289         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2290                 if (speed != SPEED_10)
2291                         return 1;
2292         } else if (speed == SPEED_10)
2293                 return 1;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_setup_phy(struct tg3 *, int);
2299
2300 #define RESET_KIND_SHUTDOWN     0
2301 #define RESET_KIND_INIT         1
2302 #define RESET_KIND_SUSPEND      2
2303
2304 static void tg3_write_sig_post_reset(struct tg3 *, int);
2305 static int tg3_halt_cpu(struct tg3 *, u32);
2306
2307 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2308 {
2309         u32 val;
2310
2311         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2312                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2313                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2314                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2315
2316                         sg_dig_ctrl |=
2317                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2318                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2319                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2320                 }
2321                 return;
2322         }
2323
2324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2325                 tg3_bmcr_reset(tp);
2326                 val = tr32(GRC_MISC_CFG);
2327                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2328                 udelay(40);
2329                 return;
2330         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2331                 u32 phytest;
2332                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2333                         u32 phy;
2334
2335                         tg3_writephy(tp, MII_ADVERTISE, 0);
2336                         tg3_writephy(tp, MII_BMCR,
2337                                      BMCR_ANENABLE | BMCR_ANRESTART);
2338
2339                         tg3_writephy(tp, MII_TG3_FET_TEST,
2340                                      phytest | MII_TG3_FET_SHADOW_EN);
2341                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2342                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2343                                 tg3_writephy(tp,
2344                                              MII_TG3_FET_SHDW_AUXMODE4,
2345                                              phy);
2346                         }
2347                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2348                 }
2349                 return;
2350         } else if (do_low_power) {
2351                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2352                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2353
2354                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2355                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2356                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2357                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2358         }
2359
2360         /* The PHY should not be powered down on some chips because
2361          * of bugs.
2362          */
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2366              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2367                 return;
2368
2369         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2370             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2371                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2372                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2373                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2374                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2375         }
2376
2377         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2378 }
2379
2380 /* tp->lock is held. */
2381 static int tg3_nvram_lock(struct tg3 *tp)
2382 {
2383         if (tg3_flag(tp, NVRAM)) {
2384                 int i;
2385
2386                 if (tp->nvram_lock_cnt == 0) {
2387                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2388                         for (i = 0; i < 8000; i++) {
2389                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2390                                         break;
2391                                 udelay(20);
2392                         }
2393                         if (i == 8000) {
2394                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2395                                 return -ENODEV;
2396                         }
2397                 }
2398                 tp->nvram_lock_cnt++;
2399         }
2400         return 0;
2401 }
2402
2403 /* tp->lock is held. */
2404 static void tg3_nvram_unlock(struct tg3 *tp)
2405 {
2406         if (tg3_flag(tp, NVRAM)) {
2407                 if (tp->nvram_lock_cnt > 0)
2408                         tp->nvram_lock_cnt--;
2409                 if (tp->nvram_lock_cnt == 0)
2410                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2411         }
2412 }
2413
2414 /* tp->lock is held. */
2415 static void tg3_enable_nvram_access(struct tg3 *tp)
2416 {
2417         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2418                 u32 nvaccess = tr32(NVRAM_ACCESS);
2419
2420                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2421         }
2422 }
2423
2424 /* tp->lock is held. */
2425 static void tg3_disable_nvram_access(struct tg3 *tp)
2426 {
2427         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2428                 u32 nvaccess = tr32(NVRAM_ACCESS);
2429
2430                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2431         }
2432 }
2433
2434 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2435                                         u32 offset, u32 *val)
2436 {
2437         u32 tmp;
2438         int i;
2439
2440         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2441                 return -EINVAL;
2442
2443         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2444                                         EEPROM_ADDR_DEVID_MASK |
2445                                         EEPROM_ADDR_READ);
2446         tw32(GRC_EEPROM_ADDR,
2447              tmp |
2448              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2449              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2450               EEPROM_ADDR_ADDR_MASK) |
2451              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2452
2453         for (i = 0; i < 1000; i++) {
2454                 tmp = tr32(GRC_EEPROM_ADDR);
2455
2456                 if (tmp & EEPROM_ADDR_COMPLETE)
2457                         break;
2458                 msleep(1);
2459         }
2460         if (!(tmp & EEPROM_ADDR_COMPLETE))
2461                 return -EBUSY;
2462
2463         tmp = tr32(GRC_EEPROM_DATA);
2464
2465         /*
2466          * The data will always be opposite the native endian
2467          * format.  Perform a blind byteswap to compensate.
2468          */
2469         *val = swab32(tmp);
2470
2471         return 0;
2472 }
2473
2474 #define NVRAM_CMD_TIMEOUT 10000
2475
2476 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2477 {
2478         int i;
2479
2480         tw32(NVRAM_CMD, nvram_cmd);
2481         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2482                 udelay(10);
2483                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2484                         udelay(10);
2485                         break;
2486                 }
2487         }
2488
2489         if (i == NVRAM_CMD_TIMEOUT)
2490                 return -EBUSY;
2491
2492         return 0;
2493 }
2494
2495 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2496 {
2497         if (tg3_flag(tp, NVRAM) &&
2498             tg3_flag(tp, NVRAM_BUFFERED) &&
2499             tg3_flag(tp, FLASH) &&
2500             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2501             (tp->nvram_jedecnum == JEDEC_ATMEL))
2502
2503                 addr = ((addr / tp->nvram_pagesize) <<
2504                         ATMEL_AT45DB0X1B_PAGE_POS) +
2505                        (addr % tp->nvram_pagesize);
2506
2507         return addr;
2508 }
2509
2510 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2511 {
2512         if (tg3_flag(tp, NVRAM) &&
2513             tg3_flag(tp, NVRAM_BUFFERED) &&
2514             tg3_flag(tp, FLASH) &&
2515             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2516             (tp->nvram_jedecnum == JEDEC_ATMEL))
2517
2518                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2519                         tp->nvram_pagesize) +
2520                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2521
2522         return addr;
2523 }
2524
2525 /* NOTE: Data read in from NVRAM is byteswapped according to
2526  * the byteswapping settings for all other register accesses.
2527  * tg3 devices are BE devices, so on a BE machine, the data
2528  * returned will be exactly as it is seen in NVRAM.  On a LE
2529  * machine, the 32-bit value will be byteswapped.
2530  */
2531 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2532 {
2533         int ret;
2534
2535         if (!tg3_flag(tp, NVRAM))
2536                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2537
2538         offset = tg3_nvram_phys_addr(tp, offset);
2539
2540         if (offset > NVRAM_ADDR_MSK)
2541                 return -EINVAL;
2542
2543         ret = tg3_nvram_lock(tp);
2544         if (ret)
2545                 return ret;
2546
2547         tg3_enable_nvram_access(tp);
2548
2549         tw32(NVRAM_ADDR, offset);
2550         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2551                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2552
2553         if (ret == 0)
2554                 *val = tr32(NVRAM_RDDATA);
2555
2556         tg3_disable_nvram_access(tp);
2557
2558         tg3_nvram_unlock(tp);
2559
2560         return ret;
2561 }
2562
2563 /* Ensures NVRAM data is in bytestream format. */
2564 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2565 {
2566         u32 v;
2567         int res = tg3_nvram_read(tp, offset, &v);
2568         if (!res)
2569                 *val = cpu_to_be32(v);
2570         return res;
2571 }
2572
2573 /* tp->lock is held. */
2574 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2575 {
2576         u32 addr_high, addr_low;
2577         int i;
2578
2579         addr_high = ((tp->dev->dev_addr[0] << 8) |
2580                      tp->dev->dev_addr[1]);
2581         addr_low = ((tp->dev->dev_addr[2] << 24) |
2582                     (tp->dev->dev_addr[3] << 16) |
2583                     (tp->dev->dev_addr[4] <<  8) |
2584                     (tp->dev->dev_addr[5] <<  0));
2585         for (i = 0; i < 4; i++) {
2586                 if (i == 1 && skip_mac_1)
2587                         continue;
2588                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2589                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2590         }
2591
2592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2594                 for (i = 0; i < 12; i++) {
2595                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2596                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2597                 }
2598         }
2599
2600         addr_high = (tp->dev->dev_addr[0] +
2601                      tp->dev->dev_addr[1] +
2602                      tp->dev->dev_addr[2] +
2603                      tp->dev->dev_addr[3] +
2604                      tp->dev->dev_addr[4] +
2605                      tp->dev->dev_addr[5]) &
2606                 TX_BACKOFF_SEED_MASK;
2607         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2608 }
2609
2610 static void tg3_enable_register_access(struct tg3 *tp)
2611 {
2612         /*
2613          * Make sure register accesses (indirect or otherwise) will function
2614          * correctly.
2615          */
2616         pci_write_config_dword(tp->pdev,
2617                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2618 }
2619
2620 static int tg3_power_up(struct tg3 *tp)
2621 {
2622         tg3_enable_register_access(tp);
2623
2624         pci_set_power_state(tp->pdev, PCI_D0);
2625
2626         /* Switch out of Vaux if it is a NIC */
2627         if (tg3_flag(tp, IS_NIC))
2628                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2629
2630         return 0;
2631 }
2632
2633 static int tg3_power_down_prepare(struct tg3 *tp)
2634 {
2635         u32 misc_host_ctrl;
2636         bool device_should_wake, do_low_power;
2637
2638         tg3_enable_register_access(tp);
2639
2640         /* Restore the CLKREQ setting. */
2641         if (tg3_flag(tp, CLKREQ_BUG)) {
2642                 u16 lnkctl;
2643
2644                 pci_read_config_word(tp->pdev,
2645                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2646                                      &lnkctl);
2647                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2648                 pci_write_config_word(tp->pdev,
2649                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2650                                       lnkctl);
2651         }
2652
2653         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2654         tw32(TG3PCI_MISC_HOST_CTRL,
2655              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2656
2657         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2658                              tg3_flag(tp, WOL_ENABLE);
2659
2660         if (tg3_flag(tp, USE_PHYLIB)) {
2661                 do_low_power = false;
2662                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2663                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2664                         struct phy_device *phydev;
2665                         u32 phyid, advertising;
2666
2667                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2668
2669                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2670
2671                         tp->link_config.orig_speed = phydev->speed;
2672                         tp->link_config.orig_duplex = phydev->duplex;
2673                         tp->link_config.orig_autoneg = phydev->autoneg;
2674                         tp->link_config.orig_advertising = phydev->advertising;
2675
2676                         advertising = ADVERTISED_TP |
2677                                       ADVERTISED_Pause |
2678                                       ADVERTISED_Autoneg |
2679                                       ADVERTISED_10baseT_Half;
2680
2681                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2682                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2683                                         advertising |=
2684                                                 ADVERTISED_100baseT_Half |
2685                                                 ADVERTISED_100baseT_Full |
2686                                                 ADVERTISED_10baseT_Full;
2687                                 else
2688                                         advertising |= ADVERTISED_10baseT_Full;
2689                         }
2690
2691                         phydev->advertising = advertising;
2692
2693                         phy_start_aneg(phydev);
2694
2695                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2696                         if (phyid != PHY_ID_BCMAC131) {
2697                                 phyid &= PHY_BCM_OUI_MASK;
2698                                 if (phyid == PHY_BCM_OUI_1 ||
2699                                     phyid == PHY_BCM_OUI_2 ||
2700                                     phyid == PHY_BCM_OUI_3)
2701                                         do_low_power = true;
2702                         }
2703                 }
2704         } else {
2705                 do_low_power = true;
2706
2707                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2708                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2709                         tp->link_config.orig_speed = tp->link_config.speed;
2710                         tp->link_config.orig_duplex = tp->link_config.duplex;
2711                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2712                 }
2713
2714                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2715                         tp->link_config.speed = SPEED_10;
2716                         tp->link_config.duplex = DUPLEX_HALF;
2717                         tp->link_config.autoneg = AUTONEG_ENABLE;
2718                         tg3_setup_phy(tp, 0);
2719                 }
2720         }
2721
2722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2723                 u32 val;
2724
2725                 val = tr32(GRC_VCPU_EXT_CTRL);
2726                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2727         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2728                 int i;
2729                 u32 val;
2730
2731                 for (i = 0; i < 200; i++) {
2732                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2733                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2734                                 break;
2735                         msleep(1);
2736                 }
2737         }
2738         if (tg3_flag(tp, WOL_CAP))
2739                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2740                                                      WOL_DRV_STATE_SHUTDOWN |
2741                                                      WOL_DRV_WOL |
2742                                                      WOL_SET_MAGIC_PKT);
2743
2744         if (device_should_wake) {
2745                 u32 mac_mode;
2746
2747                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2748                         if (do_low_power &&
2749                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2750                                 tg3_phy_auxctl_write(tp,
2751                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2752                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2753                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2754                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2755                                 udelay(40);
2756                         }
2757
2758                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2759                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2760                         else
2761                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2762
2763                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2764                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2765                             ASIC_REV_5700) {
2766                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2767                                              SPEED_100 : SPEED_10;
2768                                 if (tg3_5700_link_polarity(tp, speed))
2769                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2770                                 else
2771                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2772                         }
2773                 } else {
2774                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2775                 }
2776
2777                 if (!tg3_flag(tp, 5750_PLUS))
2778                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2779
2780                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2781                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2782                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2783                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2784
2785                 if (tg3_flag(tp, ENABLE_APE))
2786                         mac_mode |= MAC_MODE_APE_TX_EN |
2787                                     MAC_MODE_APE_RX_EN |
2788                                     MAC_MODE_TDE_ENABLE;
2789
2790                 tw32_f(MAC_MODE, mac_mode);
2791                 udelay(100);
2792
2793                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2794                 udelay(10);
2795         }
2796
2797         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2798             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2799              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2800                 u32 base_val;
2801
2802                 base_val = tp->pci_clock_ctrl;
2803                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2804                              CLOCK_CTRL_TXCLK_DISABLE);
2805
2806                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2807                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2808         } else if (tg3_flag(tp, 5780_CLASS) ||
2809                    tg3_flag(tp, CPMU_PRESENT) ||
2810                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2811                 /* do nothing */
2812         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2813                 u32 newbits1, newbits2;
2814
2815                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2816                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2817                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2818                                     CLOCK_CTRL_TXCLK_DISABLE |
2819                                     CLOCK_CTRL_ALTCLK);
2820                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821                 } else if (tg3_flag(tp, 5705_PLUS)) {
2822                         newbits1 = CLOCK_CTRL_625_CORE;
2823                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2824                 } else {
2825                         newbits1 = CLOCK_CTRL_ALTCLK;
2826                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2827                 }
2828
2829                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2830                             40);
2831
2832                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2833                             40);
2834
2835                 if (!tg3_flag(tp, 5705_PLUS)) {
2836                         u32 newbits3;
2837
2838                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2839                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2840                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2841                                             CLOCK_CTRL_TXCLK_DISABLE |
2842                                             CLOCK_CTRL_44MHZ_CORE);
2843                         } else {
2844                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2845                         }
2846
2847                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2848                                     tp->pci_clock_ctrl | newbits3, 40);
2849                 }
2850         }
2851
2852         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2853                 tg3_power_down_phy(tp, do_low_power);
2854
2855         tg3_frob_aux_power(tp);
2856
2857         /* Workaround for unstable PLL clock */
2858         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2859             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2860                 u32 val = tr32(0x7d00);
2861
2862                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2863                 tw32(0x7d00, val);
2864                 if (!tg3_flag(tp, ENABLE_ASF)) {
2865                         int err;
2866
2867                         err = tg3_nvram_lock(tp);
2868                         tg3_halt_cpu(tp, RX_CPU_BASE);
2869                         if (!err)
2870                                 tg3_nvram_unlock(tp);
2871                 }
2872         }
2873
2874         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2875
2876         return 0;
2877 }
2878
2879 static void tg3_power_down(struct tg3 *tp)
2880 {
2881         tg3_power_down_prepare(tp);
2882
2883         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2884         pci_set_power_state(tp->pdev, PCI_D3hot);
2885 }
2886
2887 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2888 {
2889         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2890         case MII_TG3_AUX_STAT_10HALF:
2891                 *speed = SPEED_10;
2892                 *duplex = DUPLEX_HALF;
2893                 break;
2894
2895         case MII_TG3_AUX_STAT_10FULL:
2896                 *speed = SPEED_10;
2897                 *duplex = DUPLEX_FULL;
2898                 break;
2899
2900         case MII_TG3_AUX_STAT_100HALF:
2901                 *speed = SPEED_100;
2902                 *duplex = DUPLEX_HALF;
2903                 break;
2904
2905         case MII_TG3_AUX_STAT_100FULL:
2906                 *speed = SPEED_100;
2907                 *duplex = DUPLEX_FULL;
2908                 break;
2909
2910         case MII_TG3_AUX_STAT_1000HALF:
2911                 *speed = SPEED_1000;
2912                 *duplex = DUPLEX_HALF;
2913                 break;
2914
2915         case MII_TG3_AUX_STAT_1000FULL:
2916                 *speed = SPEED_1000;
2917                 *duplex = DUPLEX_FULL;
2918                 break;
2919
2920         default:
2921                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2922                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2923                                  SPEED_10;
2924                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2925                                   DUPLEX_HALF;
2926                         break;
2927                 }
2928                 *speed = SPEED_INVALID;
2929                 *duplex = DUPLEX_INVALID;
2930                 break;
2931         }
2932 }
2933
2934 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2935 {
2936         int err = 0;
2937         u32 val, new_adv;
2938
2939         new_adv = ADVERTISE_CSMA;
2940         if (advertise & ADVERTISED_10baseT_Half)
2941                 new_adv |= ADVERTISE_10HALF;
2942         if (advertise & ADVERTISED_10baseT_Full)
2943                 new_adv |= ADVERTISE_10FULL;
2944         if (advertise & ADVERTISED_100baseT_Half)
2945                 new_adv |= ADVERTISE_100HALF;
2946         if (advertise & ADVERTISED_100baseT_Full)
2947                 new_adv |= ADVERTISE_100FULL;
2948
2949         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2950
2951         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952         if (err)
2953                 goto done;
2954
2955         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2956                 goto done;
2957
2958         new_adv = 0;
2959         if (advertise & ADVERTISED_1000baseT_Half)
2960                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2961         if (advertise & ADVERTISED_1000baseT_Full)
2962                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2963
2964         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2965             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2966                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2967                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2968
2969         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2970         if (err)
2971                 goto done;
2972
2973         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2974                 goto done;
2975
2976         tw32(TG3_CPMU_EEE_MODE,
2977              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2978
2979         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2980         if (!err) {
2981                 u32 err2;
2982
2983                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2984                 case ASIC_REV_5717:
2985                 case ASIC_REV_57765:
2986                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2987                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2988                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2989                         /* Fall through */
2990                 case ASIC_REV_5719:
2991                         val = MII_TG3_DSP_TAP26_ALNOKO |
2992                               MII_TG3_DSP_TAP26_RMRXSTO |
2993                               MII_TG3_DSP_TAP26_OPCSINPT;
2994                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2995                 }
2996
2997                 val = 0;
2998                 /* Advertise 100-BaseTX EEE ability */
2999                 if (advertise & ADVERTISED_100baseT_Full)
3000                         val |= MDIO_AN_EEE_ADV_100TX;
3001                 /* Advertise 1000-BaseT EEE ability */
3002                 if (advertise & ADVERTISED_1000baseT_Full)
3003                         val |= MDIO_AN_EEE_ADV_1000T;
3004                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3005
3006                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3007                 if (!err)
3008                         err = err2;
3009         }
3010
3011 done:
3012         return err;
3013 }
3014
3015 static void tg3_phy_copper_begin(struct tg3 *tp)
3016 {
3017         u32 new_adv;
3018         int i;
3019
3020         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3021                 new_adv = ADVERTISED_10baseT_Half |
3022                           ADVERTISED_10baseT_Full;
3023                 if (tg3_flag(tp, WOL_SPEED_100MB))
3024                         new_adv |= ADVERTISED_100baseT_Half |
3025                                    ADVERTISED_100baseT_Full;
3026
3027                 tg3_phy_autoneg_cfg(tp, new_adv,
3028                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3029         } else if (tp->link_config.speed == SPEED_INVALID) {
3030                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3031                         tp->link_config.advertising &=
3032                                 ~(ADVERTISED_1000baseT_Half |
3033                                   ADVERTISED_1000baseT_Full);
3034
3035                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3036                                     tp->link_config.flowctrl);
3037         } else {
3038                 /* Asking for a specific link mode. */
3039                 if (tp->link_config.speed == SPEED_1000) {
3040                         if (tp->link_config.duplex == DUPLEX_FULL)
3041                                 new_adv = ADVERTISED_1000baseT_Full;
3042                         else
3043                                 new_adv = ADVERTISED_1000baseT_Half;
3044                 } else if (tp->link_config.speed == SPEED_100) {
3045                         if (tp->link_config.duplex == DUPLEX_FULL)
3046                                 new_adv = ADVERTISED_100baseT_Full;
3047                         else
3048                                 new_adv = ADVERTISED_100baseT_Half;
3049                 } else {
3050                         if (tp->link_config.duplex == DUPLEX_FULL)
3051                                 new_adv = ADVERTISED_10baseT_Full;
3052                         else
3053                                 new_adv = ADVERTISED_10baseT_Half;
3054                 }
3055
3056                 tg3_phy_autoneg_cfg(tp, new_adv,
3057                                     tp->link_config.flowctrl);
3058         }
3059
3060         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3061             tp->link_config.speed != SPEED_INVALID) {
3062                 u32 bmcr, orig_bmcr;
3063
3064                 tp->link_config.active_speed = tp->link_config.speed;
3065                 tp->link_config.active_duplex = tp->link_config.duplex;
3066
3067                 bmcr = 0;
3068                 switch (tp->link_config.speed) {
3069                 default:
3070                 case SPEED_10:
3071                         break;
3072
3073                 case SPEED_100:
3074                         bmcr |= BMCR_SPEED100;
3075                         break;
3076
3077                 case SPEED_1000:
3078                         bmcr |= TG3_BMCR_SPEED1000;
3079                         break;
3080                 }
3081
3082                 if (tp->link_config.duplex == DUPLEX_FULL)
3083                         bmcr |= BMCR_FULLDPLX;
3084
3085                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3086                     (bmcr != orig_bmcr)) {
3087                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3088                         for (i = 0; i < 1500; i++) {
3089                                 u32 tmp;
3090
3091                                 udelay(10);
3092                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3093                                     tg3_readphy(tp, MII_BMSR, &tmp))
3094                                         continue;
3095                                 if (!(tmp & BMSR_LSTATUS)) {
3096                                         udelay(40);
3097                                         break;
3098                                 }
3099                         }
3100                         tg3_writephy(tp, MII_BMCR, bmcr);
3101                         udelay(40);
3102                 }
3103         } else {
3104                 tg3_writephy(tp, MII_BMCR,
3105                              BMCR_ANENABLE | BMCR_ANRESTART);
3106         }
3107 }
3108
3109 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3110 {
3111         int err;
3112
3113         /* Turn off tap power management. */
3114         /* Set Extended packet length bit */
3115         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3116
3117         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3118         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3119         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3121         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3122
3123         udelay(40);
3124
3125         return err;
3126 }
3127
3128 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3129 {
3130         u32 adv_reg, all_mask = 0;
3131
3132         if (mask & ADVERTISED_10baseT_Half)
3133                 all_mask |= ADVERTISE_10HALF;
3134         if (mask & ADVERTISED_10baseT_Full)
3135                 all_mask |= ADVERTISE_10FULL;
3136         if (mask & ADVERTISED_100baseT_Half)
3137                 all_mask |= ADVERTISE_100HALF;
3138         if (mask & ADVERTISED_100baseT_Full)
3139                 all_mask |= ADVERTISE_100FULL;
3140
3141         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3142                 return 0;
3143
3144         if ((adv_reg & all_mask) != all_mask)
3145                 return 0;
3146         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3147                 u32 tg3_ctrl;
3148
3149                 all_mask = 0;
3150                 if (mask & ADVERTISED_1000baseT_Half)
3151                         all_mask |= ADVERTISE_1000HALF;
3152                 if (mask & ADVERTISED_1000baseT_Full)
3153                         all_mask |= ADVERTISE_1000FULL;
3154
3155                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3156                         return 0;
3157
3158                 if ((tg3_ctrl & all_mask) != all_mask)
3159                         return 0;
3160         }
3161         return 1;
3162 }
3163
3164 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3165 {
3166         u32 curadv, reqadv;
3167
3168         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3169                 return 1;
3170
3171         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3172         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3173
3174         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3175                 if (curadv != reqadv)
3176                         return 0;
3177
3178                 if (tg3_flag(tp, PAUSE_AUTONEG))
3179                         tg3_readphy(tp, MII_LPA, rmtadv);
3180         } else {
3181                 /* Reprogram the advertisement register, even if it
3182                  * does not affect the current link.  If the link
3183                  * gets renegotiated in the future, we can save an
3184                  * additional renegotiation cycle by advertising
3185                  * it correctly in the first place.
3186                  */
3187                 if (curadv != reqadv) {
3188                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3189                                      ADVERTISE_PAUSE_ASYM);
3190                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3191                 }
3192         }
3193
3194         return 1;
3195 }
3196
3197 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3198 {
3199         int current_link_up;
3200         u32 bmsr, val;
3201         u32 lcl_adv, rmt_adv;
3202         u16 current_speed;
3203         u8 current_duplex;
3204         int i, err;
3205
3206         tw32(MAC_EVENT, 0);
3207
3208         tw32_f(MAC_STATUS,
3209              (MAC_STATUS_SYNC_CHANGED |
3210               MAC_STATUS_CFG_CHANGED |
3211               MAC_STATUS_MI_COMPLETION |
3212               MAC_STATUS_LNKSTATE_CHANGED));
3213         udelay(40);
3214
3215         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3216                 tw32_f(MAC_MI_MODE,
3217                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3218                 udelay(80);
3219         }
3220
3221         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3222
3223         /* Some third-party PHYs need to be reset on link going
3224          * down.
3225          */
3226         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3227              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3229             netif_carrier_ok(tp->dev)) {
3230                 tg3_readphy(tp, MII_BMSR, &bmsr);
3231                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3232                     !(bmsr & BMSR_LSTATUS))
3233                         force_reset = 1;
3234         }
3235         if (force_reset)
3236                 tg3_phy_reset(tp);
3237
3238         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3239                 tg3_readphy(tp, MII_BMSR, &bmsr);
3240                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3241                     !tg3_flag(tp, INIT_COMPLETE))
3242                         bmsr = 0;
3243
3244                 if (!(bmsr & BMSR_LSTATUS)) {
3245                         err = tg3_init_5401phy_dsp(tp);
3246                         if (err)
3247                                 return err;
3248
3249                         tg3_readphy(tp, MII_BMSR, &bmsr);
3250                         for (i = 0; i < 1000; i++) {
3251                                 udelay(10);
3252                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3253                                     (bmsr & BMSR_LSTATUS)) {
3254                                         udelay(40);
3255                                         break;
3256                                 }
3257                         }
3258
3259                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3260                             TG3_PHY_REV_BCM5401_B0 &&
3261                             !(bmsr & BMSR_LSTATUS) &&
3262                             tp->link_config.active_speed == SPEED_1000) {
3263                                 err = tg3_phy_reset(tp);
3264                                 if (!err)
3265                                         err = tg3_init_5401phy_dsp(tp);
3266                                 if (err)
3267                                         return err;
3268                         }
3269                 }
3270         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3271                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3272                 /* 5701 {A0,B0} CRC bug workaround */
3273                 tg3_writephy(tp, 0x15, 0x0a75);
3274                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3277         }
3278
3279         /* Clear pending interrupts... */
3280         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282
3283         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3284                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3285         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3286                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3287
3288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3290                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3291                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3292                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3293                 else
3294                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3295         }
3296
3297         current_link_up = 0;
3298         current_speed = SPEED_INVALID;
3299         current_duplex = DUPLEX_INVALID;
3300
3301         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3302                 err = tg3_phy_auxctl_read(tp,
3303                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3304                                           &val);
3305                 if (!err && !(val & (1 << 10))) {
3306                         tg3_phy_auxctl_write(tp,
3307                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3308                                              val | (1 << 10));
3309                         goto relink;
3310                 }
3311         }
3312
3313         bmsr = 0;
3314         for (i = 0; i < 100; i++) {
3315                 tg3_readphy(tp, MII_BMSR, &bmsr);
3316                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3317                     (bmsr & BMSR_LSTATUS))
3318                         break;
3319                 udelay(40);
3320         }
3321
3322         if (bmsr & BMSR_LSTATUS) {
3323                 u32 aux_stat, bmcr;
3324
3325                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3326                 for (i = 0; i < 2000; i++) {
3327                         udelay(10);
3328                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3329                             aux_stat)
3330                                 break;
3331                 }
3332
3333                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3334                                              &current_speed,
3335                                              &current_duplex);
3336
3337                 bmcr = 0;
3338                 for (i = 0; i < 200; i++) {
3339                         tg3_readphy(tp, MII_BMCR, &bmcr);
3340                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3341                                 continue;
3342                         if (bmcr && bmcr != 0x7fff)
3343                                 break;
3344                         udelay(10);
3345                 }
3346
3347                 lcl_adv = 0;
3348                 rmt_adv = 0;
3349
3350                 tp->link_config.active_speed = current_speed;
3351                 tp->link_config.active_duplex = current_duplex;
3352
3353                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3354                         if ((bmcr & BMCR_ANENABLE) &&
3355                             tg3_copper_is_advertising_all(tp,
3356                                                 tp->link_config.advertising)) {
3357                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3358                                                                   &rmt_adv))
3359                                         current_link_up = 1;
3360                         }
3361                 } else {
3362                         if (!(bmcr & BMCR_ANENABLE) &&
3363                             tp->link_config.speed == current_speed &&
3364                             tp->link_config.duplex == current_duplex &&
3365                             tp->link_config.flowctrl ==
3366                             tp->link_config.active_flowctrl) {
3367                                 current_link_up = 1;
3368                         }
3369                 }
3370
3371                 if (current_link_up == 1 &&
3372                     tp->link_config.active_duplex == DUPLEX_FULL)
3373                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3374         }
3375
3376 relink:
3377         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3378                 tg3_phy_copper_begin(tp);
3379
3380                 tg3_readphy(tp, MII_BMSR, &bmsr);
3381                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3382                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3383                         current_link_up = 1;
3384         }
3385
3386         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3387         if (current_link_up == 1) {
3388                 if (tp->link_config.active_speed == SPEED_100 ||
3389                     tp->link_config.active_speed == SPEED_10)
3390                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3391                 else
3392                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3394                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3395         else
3396                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3397
3398         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3399         if (tp->link_config.active_duplex == DUPLEX_HALF)
3400                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3401
3402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3403                 if (current_link_up == 1 &&
3404                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3405                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3406                 else
3407                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3408         }
3409
3410         /* ??? Without this setting Netgear GA302T PHY does not
3411          * ??? send/receive packets...
3412          */
3413         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3414             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3415                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3416                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3417                 udelay(80);
3418         }
3419
3420         tw32_f(MAC_MODE, tp->mac_mode);
3421         udelay(40);
3422
3423         tg3_phy_eee_adjust(tp, current_link_up);
3424
3425         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3426                 /* Polled via timer. */
3427                 tw32_f(MAC_EVENT, 0);
3428         } else {
3429                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3430         }
3431         udelay(40);
3432
3433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3434             current_link_up == 1 &&
3435             tp->link_config.active_speed == SPEED_1000 &&
3436             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3437                 udelay(120);
3438                 tw32_f(MAC_STATUS,
3439                      (MAC_STATUS_SYNC_CHANGED |
3440                       MAC_STATUS_CFG_CHANGED));
3441                 udelay(40);
3442                 tg3_write_mem(tp,
3443                               NIC_SRAM_FIRMWARE_MBOX,
3444                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3445         }
3446
3447         /* Prevent send BD corruption. */
3448         if (tg3_flag(tp, CLKREQ_BUG)) {
3449                 u16 oldlnkctl, newlnkctl;
3450
3451                 pci_read_config_word(tp->pdev,
3452                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3453                                      &oldlnkctl);
3454                 if (tp->link_config.active_speed == SPEED_100 ||
3455                     tp->link_config.active_speed == SPEED_10)
3456                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3457                 else
3458                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3459                 if (newlnkctl != oldlnkctl)
3460                         pci_write_config_word(tp->pdev,
3461                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3462                                               newlnkctl);
3463         }
3464
3465         if (current_link_up != netif_carrier_ok(tp->dev)) {
3466                 if (current_link_up)
3467                         netif_carrier_on(tp->dev);
3468                 else
3469                         netif_carrier_off(tp->dev);
3470                 tg3_link_report(tp);
3471         }
3472
3473         return 0;
3474 }
3475
3476 struct tg3_fiber_aneginfo {
3477         int state;
3478 #define ANEG_STATE_UNKNOWN              0
3479 #define ANEG_STATE_AN_ENABLE            1
3480 #define ANEG_STATE_RESTART_INIT         2
3481 #define ANEG_STATE_RESTART              3
3482 #define ANEG_STATE_DISABLE_LINK_OK      4
3483 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3484 #define ANEG_STATE_ABILITY_DETECT       6
3485 #define ANEG_STATE_ACK_DETECT_INIT      7
3486 #define ANEG_STATE_ACK_DETECT           8
3487 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3488 #define ANEG_STATE_COMPLETE_ACK         10
3489 #define ANEG_STATE_IDLE_DETECT_INIT     11
3490 #define ANEG_STATE_IDLE_DETECT          12
3491 #define ANEG_STATE_LINK_OK              13
3492 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3493 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3494
3495         u32 flags;
3496 #define MR_AN_ENABLE            0x00000001
3497 #define MR_RESTART_AN           0x00000002
3498 #define MR_AN_COMPLETE          0x00000004
3499 #define MR_PAGE_RX              0x00000008
3500 #define MR_NP_LOADED            0x00000010
3501 #define MR_TOGGLE_TX            0x00000020
3502 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3503 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3504 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3505 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3506 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3507 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3508 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3509 #define MR_TOGGLE_RX            0x00002000
3510 #define MR_NP_RX                0x00004000
3511
3512 #define MR_LINK_OK              0x80000000
3513
3514         unsigned long link_time, cur_time;
3515
3516         u32 ability_match_cfg;
3517         int ability_match_count;
3518
3519         char ability_match, idle_match, ack_match;
3520
3521         u32 txconfig, rxconfig;
3522 #define ANEG_CFG_NP             0x00000080
3523 #define ANEG_CFG_ACK            0x00000040
3524 #define ANEG_CFG_RF2            0x00000020
3525 #define ANEG_CFG_RF1            0x00000010
3526 #define ANEG_CFG_PS2            0x00000001
3527 #define ANEG_CFG_PS1            0x00008000
3528 #define ANEG_CFG_HD             0x00004000
3529 #define ANEG_CFG_FD             0x00002000
3530 #define ANEG_CFG_INVAL          0x00001f06
3531
3532 };
3533 #define ANEG_OK         0
3534 #define ANEG_DONE       1
3535 #define ANEG_TIMER_ENAB 2
3536 #define ANEG_FAILED     -1
3537
3538 #define ANEG_STATE_SETTLE_TIME  10000
3539
3540 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3541                                    struct tg3_fiber_aneginfo *ap)
3542 {
3543         u16 flowctrl;
3544         unsigned long delta;
3545         u32 rx_cfg_reg;
3546         int ret;
3547
3548         if (ap->state == ANEG_STATE_UNKNOWN) {
3549                 ap->rxconfig = 0;
3550                 ap->link_time = 0;
3551                 ap->cur_time = 0;
3552                 ap->ability_match_cfg = 0;
3553                 ap->ability_match_count = 0;
3554                 ap->ability_match = 0;
3555                 ap->idle_match = 0;
3556                 ap->ack_match = 0;
3557         }
3558         ap->cur_time++;
3559
3560         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3561                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3562
3563                 if (rx_cfg_reg != ap->ability_match_cfg) {
3564                         ap->ability_match_cfg = rx_cfg_reg;
3565                         ap->ability_match = 0;
3566                         ap->ability_match_count = 0;
3567                 } else {
3568                         if (++ap->ability_match_count > 1) {
3569                                 ap->ability_match = 1;
3570                                 ap->ability_match_cfg = rx_cfg_reg;
3571                         }
3572                 }
3573                 if (rx_cfg_reg & ANEG_CFG_ACK)
3574                         ap->ack_match = 1;
3575                 else
3576                         ap->ack_match = 0;
3577
3578                 ap->idle_match = 0;
3579         } else {
3580                 ap->idle_match = 1;
3581                 ap->ability_match_cfg = 0;
3582                 ap->ability_match_count = 0;
3583                 ap->ability_match = 0;
3584                 ap->ack_match = 0;
3585
3586                 rx_cfg_reg = 0;
3587         }
3588
3589         ap->rxconfig = rx_cfg_reg;
3590         ret = ANEG_OK;
3591
3592         switch (ap->state) {
3593         case ANEG_STATE_UNKNOWN:
3594                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3595                         ap->state = ANEG_STATE_AN_ENABLE;
3596
3597                 /* fallthru */
3598         case ANEG_STATE_AN_ENABLE:
3599                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3600                 if (ap->flags & MR_AN_ENABLE) {
3601                         ap->link_time = 0;
3602                         ap->cur_time = 0;
3603                         ap->ability_match_cfg = 0;
3604                         ap->ability_match_count = 0;
3605                         ap->ability_match = 0;
3606                         ap->idle_match = 0;
3607                         ap->ack_match = 0;
3608
3609                         ap->state = ANEG_STATE_RESTART_INIT;
3610                 } else {
3611                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3612                 }
3613                 break;
3614
3615         case ANEG_STATE_RESTART_INIT:
3616                 ap->link_time = ap->cur_time;
3617                 ap->flags &= ~(MR_NP_LOADED);
3618                 ap->txconfig = 0;
3619                 tw32(MAC_TX_AUTO_NEG, 0);
3620                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3621                 tw32_f(MAC_MODE, tp->mac_mode);
3622                 udelay(40);
3623
3624                 ret = ANEG_TIMER_ENAB;
3625                 ap->state = ANEG_STATE_RESTART;
3626
3627                 /* fallthru */
3628         case ANEG_STATE_RESTART:
3629                 delta = ap->cur_time - ap->link_time;
3630                 if (delta > ANEG_STATE_SETTLE_TIME)
3631                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3632                 else
3633                         ret = ANEG_TIMER_ENAB;
3634                 break;
3635
3636         case ANEG_STATE_DISABLE_LINK_OK:
3637                 ret = ANEG_DONE;
3638                 break;
3639
3640         case ANEG_STATE_ABILITY_DETECT_INIT:
3641                 ap->flags &= ~(MR_TOGGLE_TX);
3642                 ap->txconfig = ANEG_CFG_FD;
3643                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3644                 if (flowctrl & ADVERTISE_1000XPAUSE)
3645                         ap->txconfig |= ANEG_CFG_PS1;
3646                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3647                         ap->txconfig |= ANEG_CFG_PS2;
3648                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3649                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3650                 tw32_f(MAC_MODE, tp->mac_mode);
3651                 udelay(40);
3652
3653                 ap->state = ANEG_STATE_ABILITY_DETECT;
3654                 break;
3655
3656         case ANEG_STATE_ABILITY_DETECT:
3657                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3658                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3659                 break;
3660
3661         case ANEG_STATE_ACK_DETECT_INIT:
3662                 ap->txconfig |= ANEG_CFG_ACK;
3663                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3664                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3665                 tw32_f(MAC_MODE, tp->mac_mode);
3666                 udelay(40);
3667
3668                 ap->state = ANEG_STATE_ACK_DETECT;
3669
3670                 /* fallthru */
3671         case ANEG_STATE_ACK_DETECT:
3672                 if (ap->ack_match != 0) {
3673                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3674                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3675                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3676                         } else {
3677                                 ap->state = ANEG_STATE_AN_ENABLE;
3678                         }
3679                 } else if (ap->ability_match != 0 &&
3680                            ap->rxconfig == 0) {
3681                         ap->state = ANEG_STATE_AN_ENABLE;
3682                 }
3683                 break;
3684
3685         case ANEG_STATE_COMPLETE_ACK_INIT:
3686                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3687                         ret = ANEG_FAILED;
3688                         break;
3689                 }
3690                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3691                                MR_LP_ADV_HALF_DUPLEX |
3692                                MR_LP_ADV_SYM_PAUSE |
3693                                MR_LP_ADV_ASYM_PAUSE |
3694                                MR_LP_ADV_REMOTE_FAULT1 |
3695                                MR_LP_ADV_REMOTE_FAULT2 |
3696                                MR_LP_ADV_NEXT_PAGE |
3697                                MR_TOGGLE_RX |
3698                                MR_NP_RX);
3699                 if (ap->rxconfig & ANEG_CFG_FD)
3700                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3701                 if (ap->rxconfig & ANEG_CFG_HD)
3702                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3703                 if (ap->rxconfig & ANEG_CFG_PS1)
3704                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3705                 if (ap->rxconfig & ANEG_CFG_PS2)
3706                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3707                 if (ap->rxconfig & ANEG_CFG_RF1)
3708                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3709                 if (ap->rxconfig & ANEG_CFG_RF2)
3710                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3711                 if (ap->rxconfig & ANEG_CFG_NP)
3712                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3713
3714                 ap->link_time = ap->cur_time;
3715
3716                 ap->flags ^= (MR_TOGGLE_TX);
3717                 if (ap->rxconfig & 0x0008)
3718                         ap->flags |= MR_TOGGLE_RX;
3719                 if (ap->rxconfig & ANEG_CFG_NP)
3720                         ap->flags |= MR_NP_RX;
3721                 ap->flags |= MR_PAGE_RX;
3722
3723                 ap->state = ANEG_STATE_COMPLETE_ACK;
3724                 ret = ANEG_TIMER_ENAB;
3725                 break;
3726
3727         case ANEG_STATE_COMPLETE_ACK:
3728                 if (ap->ability_match != 0 &&
3729                     ap->rxconfig == 0) {
3730                         ap->state = ANEG_STATE_AN_ENABLE;
3731                         break;
3732                 }
3733                 delta = ap->cur_time - ap->link_time;
3734                 if (delta > ANEG_STATE_SETTLE_TIME) {
3735                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3736                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737                         } else {
3738                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3739                                     !(ap->flags & MR_NP_RX)) {
3740                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3741                                 } else {
3742                                         ret = ANEG_FAILED;
3743                                 }
3744                         }
3745                 }
3746                 break;
3747
3748         case ANEG_STATE_IDLE_DETECT_INIT:
3749                 ap->link_time = ap->cur_time;
3750                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3751                 tw32_f(MAC_MODE, tp->mac_mode);
3752                 udelay(40);
3753
3754                 ap->state = ANEG_STATE_IDLE_DETECT;
3755                 ret = ANEG_TIMER_ENAB;
3756                 break;
3757
3758         case ANEG_STATE_IDLE_DETECT:
3759                 if (ap->ability_match != 0 &&
3760                     ap->rxconfig == 0) {
3761                         ap->state = ANEG_STATE_AN_ENABLE;
3762                         break;
3763                 }
3764                 delta = ap->cur_time - ap->link_time;
3765                 if (delta > ANEG_STATE_SETTLE_TIME) {
3766                         /* XXX another gem from the Broadcom driver :( */
3767                         ap->state = ANEG_STATE_LINK_OK;
3768                 }
3769                 break;
3770
3771         case ANEG_STATE_LINK_OK:
3772                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3773                 ret = ANEG_DONE;
3774                 break;
3775
3776         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3777                 /* ??? unimplemented */
3778                 break;
3779
3780         case ANEG_STATE_NEXT_PAGE_WAIT:
3781                 /* ??? unimplemented */
3782                 break;
3783
3784         default:
3785                 ret = ANEG_FAILED;
3786                 break;
3787         }
3788
3789         return ret;
3790 }
3791
3792 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3793 {
3794         int res = 0;
3795         struct tg3_fiber_aneginfo aninfo;
3796         int status = ANEG_FAILED;
3797         unsigned int tick;
3798         u32 tmp;
3799
3800         tw32_f(MAC_TX_AUTO_NEG, 0);
3801
3802         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3803         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3804         udelay(40);
3805
3806         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3807         udelay(40);
3808
3809         memset(&aninfo, 0, sizeof(aninfo));
3810         aninfo.flags |= MR_AN_ENABLE;
3811         aninfo.state = ANEG_STATE_UNKNOWN;
3812         aninfo.cur_time = 0;
3813         tick = 0;
3814         while (++tick < 195000) {
3815                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3816                 if (status == ANEG_DONE || status == ANEG_FAILED)
3817                         break;
3818
3819                 udelay(1);
3820         }
3821
3822         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3823         tw32_f(MAC_MODE, tp->mac_mode);
3824         udelay(40);
3825
3826         *txflags = aninfo.txconfig;
3827         *rxflags = aninfo.flags;
3828
3829         if (status == ANEG_DONE &&
3830             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3831                              MR_LP_ADV_FULL_DUPLEX)))
3832                 res = 1;
3833
3834         return res;
3835 }
3836
3837 static void tg3_init_bcm8002(struct tg3 *tp)
3838 {
3839         u32 mac_status = tr32(MAC_STATUS);
3840         int i;
3841
3842         /* Reset when initting first time or we have a link. */
3843         if (tg3_flag(tp, INIT_COMPLETE) &&
3844             !(mac_status & MAC_STATUS_PCS_SYNCED))
3845                 return;
3846
3847         /* Set PLL lock range. */
3848         tg3_writephy(tp, 0x16, 0x8007);
3849
3850         /* SW reset */
3851         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3852
3853         /* Wait for reset to complete. */
3854         /* XXX schedule_timeout() ... */
3855         for (i = 0; i < 500; i++)
3856                 udelay(10);
3857
3858         /* Config mode; select PMA/Ch 1 regs. */
3859         tg3_writephy(tp, 0x10, 0x8411);
3860
3861         /* Enable auto-lock and comdet, select txclk for tx. */
3862         tg3_writephy(tp, 0x11, 0x0a10);
3863
3864         tg3_writephy(tp, 0x18, 0x00a0);
3865         tg3_writephy(tp, 0x16, 0x41ff);
3866
3867         /* Assert and deassert POR. */
3868         tg3_writephy(tp, 0x13, 0x0400);
3869         udelay(40);
3870         tg3_writephy(tp, 0x13, 0x0000);
3871
3872         tg3_writephy(tp, 0x11, 0x0a50);
3873         udelay(40);
3874         tg3_writephy(tp, 0x11, 0x0a10);
3875
3876         /* Wait for signal to stabilize */
3877         /* XXX schedule_timeout() ... */
3878         for (i = 0; i < 15000; i++)
3879                 udelay(10);
3880
3881         /* Deselect the channel register so we can read the PHYID
3882          * later.
3883          */
3884         tg3_writephy(tp, 0x10, 0x8011);
3885 }
3886
3887 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3888 {
3889         u16 flowctrl;
3890         u32 sg_dig_ctrl, sg_dig_status;
3891         u32 serdes_cfg, expected_sg_dig_ctrl;
3892         int workaround, port_a;
3893         int current_link_up;
3894
3895         serdes_cfg = 0;
3896         expected_sg_dig_ctrl = 0;
3897         workaround = 0;
3898         port_a = 1;
3899         current_link_up = 0;
3900
3901         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3902             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3903                 workaround = 1;
3904                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3905                         port_a = 0;
3906
3907                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3908                 /* preserve bits 20-23 for voltage regulator */
3909                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3910         }
3911
3912         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3913
3914         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3915                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3916                         if (workaround) {
3917                                 u32 val = serdes_cfg;
3918
3919                                 if (port_a)
3920                                         val |= 0xc010000;
3921                                 else
3922                                         val |= 0x4010000;
3923                                 tw32_f(MAC_SERDES_CFG, val);
3924                         }
3925
3926                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3927                 }
3928                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3929                         tg3_setup_flow_control(tp, 0, 0);
3930                         current_link_up = 1;
3931                 }
3932                 goto out;
3933         }
3934
3935         /* Want auto-negotiation.  */
3936         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3937
3938         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3939         if (flowctrl & ADVERTISE_1000XPAUSE)
3940                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3941         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3942                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3943
3944         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3945                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3946                     tp->serdes_counter &&
3947                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3948                                     MAC_STATUS_RCVD_CFG)) ==
3949                      MAC_STATUS_PCS_SYNCED)) {
3950                         tp->serdes_counter--;
3951                         current_link_up = 1;
3952                         goto out;
3953                 }
3954 restart_autoneg:
3955                 if (workaround)
3956                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3957                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3958                 udelay(5);
3959                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3960
3961                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3962                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3963         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3964                                  MAC_STATUS_SIGNAL_DET)) {
3965                 sg_dig_status = tr32(SG_DIG_STATUS);
3966                 mac_status = tr32(MAC_STATUS);
3967
3968                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3969                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3970                         u32 local_adv = 0, remote_adv = 0;
3971
3972                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3973                                 local_adv |= ADVERTISE_1000XPAUSE;
3974                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3975                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3976
3977                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3978                                 remote_adv |= LPA_1000XPAUSE;
3979                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3980                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3981
3982                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3983                         current_link_up = 1;
3984                         tp->serdes_counter = 0;
3985                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3986                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3987                         if (tp->serdes_counter)
3988                                 tp->serdes_counter--;
3989                         else {
3990                                 if (workaround) {
3991                                         u32 val = serdes_cfg;
3992
3993                                         if (port_a)
3994                                                 val |= 0xc010000;
3995                                         else
3996                                                 val |= 0x4010000;
3997
3998                                         tw32_f(MAC_SERDES_CFG, val);
3999                                 }
4000
4001                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4002                                 udelay(40);
4003
4004                                 /* Link parallel detection - link is up */
4005                                 /* only if we have PCS_SYNC and not */
4006                                 /* receiving config code words */
4007                                 mac_status = tr32(MAC_STATUS);
4008                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4009                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4010                                         tg3_setup_flow_control(tp, 0, 0);
4011                                         current_link_up = 1;
4012                                         tp->phy_flags |=
4013                                                 TG3_PHYFLG_PARALLEL_DETECT;
4014                                         tp->serdes_counter =
4015                                                 SERDES_PARALLEL_DET_TIMEOUT;
4016                                 } else
4017                                         goto restart_autoneg;
4018                         }
4019                 }
4020         } else {
4021                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4022                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4023         }
4024
4025 out:
4026         return current_link_up;
4027 }
4028
4029 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4030 {
4031         int current_link_up = 0;
4032
4033         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4034                 goto out;
4035
4036         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4037                 u32 txflags, rxflags;
4038                 int i;
4039
4040                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4041                         u32 local_adv = 0, remote_adv = 0;
4042
4043                         if (txflags & ANEG_CFG_PS1)
4044                                 local_adv |= ADVERTISE_1000XPAUSE;
4045                         if (txflags & ANEG_CFG_PS2)
4046                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4047
4048                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4049                                 remote_adv |= LPA_1000XPAUSE;
4050                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4051                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4052
4053                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4054
4055                         current_link_up = 1;
4056                 }
4057                 for (i = 0; i < 30; i++) {
4058                         udelay(20);
4059                         tw32_f(MAC_STATUS,
4060                                (MAC_STATUS_SYNC_CHANGED |
4061                                 MAC_STATUS_CFG_CHANGED));
4062                         udelay(40);
4063                         if ((tr32(MAC_STATUS) &
4064                              (MAC_STATUS_SYNC_CHANGED |
4065                               MAC_STATUS_CFG_CHANGED)) == 0)
4066                                 break;
4067                 }
4068
4069                 mac_status = tr32(MAC_STATUS);
4070                 if (current_link_up == 0 &&
4071                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4072                     !(mac_status & MAC_STATUS_RCVD_CFG))
4073                         current_link_up = 1;
4074         } else {
4075                 tg3_setup_flow_control(tp, 0, 0);
4076
4077                 /* Forcing 1000FD link up. */
4078                 current_link_up = 1;
4079
4080                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4081                 udelay(40);
4082
4083                 tw32_f(MAC_MODE, tp->mac_mode);
4084                 udelay(40);
4085         }
4086
4087 out:
4088         return current_link_up;
4089 }
4090
4091 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4092 {
4093         u32 orig_pause_cfg;
4094         u16 orig_active_speed;
4095         u8 orig_active_duplex;
4096         u32 mac_status;
4097         int current_link_up;
4098         int i;
4099
4100         orig_pause_cfg = tp->link_config.active_flowctrl;
4101         orig_active_speed = tp->link_config.active_speed;
4102         orig_active_duplex = tp->link_config.active_duplex;
4103
4104         if (!tg3_flag(tp, HW_AUTONEG) &&
4105             netif_carrier_ok(tp->dev) &&
4106             tg3_flag(tp, INIT_COMPLETE)) {
4107                 mac_status = tr32(MAC_STATUS);
4108                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4109                                MAC_STATUS_SIGNAL_DET |
4110                                MAC_STATUS_CFG_CHANGED |
4111                                MAC_STATUS_RCVD_CFG);
4112                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4113                                    MAC_STATUS_SIGNAL_DET)) {
4114                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4115                                             MAC_STATUS_CFG_CHANGED));
4116                         return 0;
4117                 }
4118         }
4119
4120         tw32_f(MAC_TX_AUTO_NEG, 0);
4121
4122         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4123         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4124         tw32_f(MAC_MODE, tp->mac_mode);
4125         udelay(40);
4126
4127         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4128                 tg3_init_bcm8002(tp);
4129
4130         /* Enable link change event even when serdes polling.  */
4131         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4132         udelay(40);
4133
4134         current_link_up = 0;
4135         mac_status = tr32(MAC_STATUS);
4136
4137         if (tg3_flag(tp, HW_AUTONEG))
4138                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4139         else
4140                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4141
4142         tp->napi[0].hw_status->status =
4143                 (SD_STATUS_UPDATED |
4144                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4145
4146         for (i = 0; i < 100; i++) {
4147                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4148                                     MAC_STATUS_CFG_CHANGED));
4149                 udelay(5);
4150                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4151                                          MAC_STATUS_CFG_CHANGED |
4152                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4153                         break;
4154         }
4155
4156         mac_status = tr32(MAC_STATUS);
4157         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4158                 current_link_up = 0;
4159                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4160                     tp->serdes_counter == 0) {
4161                         tw32_f(MAC_MODE, (tp->mac_mode |
4162                                           MAC_MODE_SEND_CONFIGS));
4163                         udelay(1);
4164                         tw32_f(MAC_MODE, tp->mac_mode);
4165                 }
4166         }
4167
4168         if (current_link_up == 1) {
4169                 tp->link_config.active_speed = SPEED_1000;
4170                 tp->link_config.active_duplex = DUPLEX_FULL;
4171                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4172                                     LED_CTRL_LNKLED_OVERRIDE |
4173                                     LED_CTRL_1000MBPS_ON));
4174         } else {
4175                 tp->link_config.active_speed = SPEED_INVALID;
4176                 tp->link_config.active_duplex = DUPLEX_INVALID;
4177                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4178                                     LED_CTRL_LNKLED_OVERRIDE |
4179                                     LED_CTRL_TRAFFIC_OVERRIDE));
4180         }
4181
4182         if (current_link_up != netif_carrier_ok(tp->dev)) {
4183                 if (current_link_up)
4184                         netif_carrier_on(tp->dev);
4185                 else
4186                         netif_carrier_off(tp->dev);
4187                 tg3_link_report(tp);
4188         } else {
4189                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4190                 if (orig_pause_cfg != now_pause_cfg ||
4191                     orig_active_speed != tp->link_config.active_speed ||
4192                     orig_active_duplex != tp->link_config.active_duplex)
4193                         tg3_link_report(tp);
4194         }
4195
4196         return 0;
4197 }
4198
4199 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4200 {
4201         int current_link_up, err = 0;
4202         u32 bmsr, bmcr;
4203         u16 current_speed;
4204         u8 current_duplex;
4205         u32 local_adv, remote_adv;
4206
4207         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4208         tw32_f(MAC_MODE, tp->mac_mode);
4209         udelay(40);
4210
4211         tw32(MAC_EVENT, 0);
4212
4213         tw32_f(MAC_STATUS,
4214              (MAC_STATUS_SYNC_CHANGED |
4215               MAC_STATUS_CFG_CHANGED |
4216               MAC_STATUS_MI_COMPLETION |
4217               MAC_STATUS_LNKSTATE_CHANGED));
4218         udelay(40);
4219
4220         if (force_reset)
4221                 tg3_phy_reset(tp);
4222
4223         current_link_up = 0;
4224         current_speed = SPEED_INVALID;
4225         current_duplex = DUPLEX_INVALID;
4226
4227         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4230                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4231                         bmsr |= BMSR_LSTATUS;
4232                 else
4233                         bmsr &= ~BMSR_LSTATUS;
4234         }
4235
4236         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4237
4238         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4239             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4240                 /* do nothing, just check for link up at the end */
4241         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4242                 u32 adv, new_adv;
4243
4244                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4245                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4246                                   ADVERTISE_1000XPAUSE |
4247                                   ADVERTISE_1000XPSE_ASYM |
4248                                   ADVERTISE_SLCT);
4249
4250                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4251
4252                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4253                         new_adv |= ADVERTISE_1000XHALF;
4254                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4255                         new_adv |= ADVERTISE_1000XFULL;
4256
4257                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4258                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4259                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4260                         tg3_writephy(tp, MII_BMCR, bmcr);
4261
4262                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4263                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4264                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4265
4266                         return err;
4267                 }
4268         } else {
4269                 u32 new_bmcr;
4270
4271                 bmcr &= ~BMCR_SPEED1000;
4272                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4273
4274                 if (tp->link_config.duplex == DUPLEX_FULL)
4275                         new_bmcr |= BMCR_FULLDPLX;
4276
4277                 if (new_bmcr != bmcr) {
4278                         /* BMCR_SPEED1000 is a reserved bit that needs
4279                          * to be set on write.
4280                          */
4281                         new_bmcr |= BMCR_SPEED1000;
4282
4283                         /* Force a linkdown */
4284                         if (netif_carrier_ok(tp->dev)) {
4285                                 u32 adv;
4286
4287                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4288                                 adv &= ~(ADVERTISE_1000XFULL |
4289                                          ADVERTISE_1000XHALF |
4290                                          ADVERTISE_SLCT);
4291                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4292                                 tg3_writephy(tp, MII_BMCR, bmcr |
4293                                                            BMCR_ANRESTART |
4294                                                            BMCR_ANENABLE);
4295                                 udelay(10);
4296                                 netif_carrier_off(tp->dev);
4297                         }
4298                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4299                         bmcr = new_bmcr;
4300                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4303                             ASIC_REV_5714) {
4304                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4305                                         bmsr |= BMSR_LSTATUS;
4306                                 else
4307                                         bmsr &= ~BMSR_LSTATUS;
4308                         }
4309                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4310                 }
4311         }
4312
4313         if (bmsr & BMSR_LSTATUS) {
4314                 current_speed = SPEED_1000;
4315                 current_link_up = 1;
4316                 if (bmcr & BMCR_FULLDPLX)
4317                         current_duplex = DUPLEX_FULL;
4318                 else
4319                         current_duplex = DUPLEX_HALF;
4320
4321                 local_adv = 0;
4322                 remote_adv = 0;
4323
4324                 if (bmcr & BMCR_ANENABLE) {
4325                         u32 common;
4326
4327                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4328                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4329                         common = local_adv & remote_adv;
4330                         if (common & (ADVERTISE_1000XHALF |
4331                                       ADVERTISE_1000XFULL)) {
4332                                 if (common & ADVERTISE_1000XFULL)
4333                                         current_duplex = DUPLEX_FULL;
4334                                 else
4335                                         current_duplex = DUPLEX_HALF;
4336                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4337                                 /* Link is up via parallel detect */
4338                         } else {
4339                                 current_link_up = 0;
4340                         }
4341                 }
4342         }
4343
4344         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4345                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4346
4347         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4348         if (tp->link_config.active_duplex == DUPLEX_HALF)
4349                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4350
4351         tw32_f(MAC_MODE, tp->mac_mode);
4352         udelay(40);
4353
4354         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4355
4356         tp->link_config.active_speed = current_speed;
4357         tp->link_config.active_duplex = current_duplex;
4358
4359         if (current_link_up != netif_carrier_ok(tp->dev)) {
4360                 if (current_link_up)
4361                         netif_carrier_on(tp->dev);
4362                 else {
4363                         netif_carrier_off(tp->dev);
4364                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4365                 }
4366                 tg3_link_report(tp);
4367         }
4368         return err;
4369 }
4370
4371 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4372 {
4373         if (tp->serdes_counter) {
4374                 /* Give autoneg time to complete. */
4375                 tp->serdes_counter--;
4376                 return;
4377         }
4378
4379         if (!netif_carrier_ok(tp->dev) &&
4380             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4381                 u32 bmcr;
4382
4383                 tg3_readphy(tp, MII_BMCR, &bmcr);
4384                 if (bmcr & BMCR_ANENABLE) {
4385                         u32 phy1, phy2;
4386
4387                         /* Select shadow register 0x1f */
4388                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4389                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4390
4391                         /* Select expansion interrupt status register */
4392                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4393                                          MII_TG3_DSP_EXP1_INT_STAT);
4394                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396
4397                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4398                                 /* We have signal detect and not receiving
4399                                  * config code words, link is up by parallel
4400                                  * detection.
4401                                  */
4402
4403                                 bmcr &= ~BMCR_ANENABLE;
4404                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4405                                 tg3_writephy(tp, MII_BMCR, bmcr);
4406                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4407                         }
4408                 }
4409         } else if (netif_carrier_ok(tp->dev) &&
4410                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4411                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4412                 u32 phy2;
4413
4414                 /* Select expansion interrupt status register */
4415                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4416                                  MII_TG3_DSP_EXP1_INT_STAT);
4417                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4418                 if (phy2 & 0x20) {
4419                         u32 bmcr;
4420
4421                         /* Config code words received, turn on autoneg. */
4422                         tg3_readphy(tp, MII_BMCR, &bmcr);
4423                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4424
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                 }
4428         }
4429 }
4430
4431 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4432 {
4433         u32 val;
4434         int err;
4435
4436         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4437                 err = tg3_setup_fiber_phy(tp, force_reset);
4438         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4439                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4440         else
4441                 err = tg3_setup_copper_phy(tp, force_reset);
4442
4443         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4444                 u32 scale;
4445
4446                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4447                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4448                         scale = 65;
4449                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4450                         scale = 6;
4451                 else
4452                         scale = 12;
4453
4454                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4455                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4456                 tw32(GRC_MISC_CFG, val);
4457         }
4458
4459         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4460               (6 << TX_LENGTHS_IPG_SHIFT);
4461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4462                 val |= tr32(MAC_TX_LENGTHS) &
4463                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4464                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4465
4466         if (tp->link_config.active_speed == SPEED_1000 &&
4467             tp->link_config.active_duplex == DUPLEX_HALF)
4468                 tw32(MAC_TX_LENGTHS, val |
4469                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4470         else
4471                 tw32(MAC_TX_LENGTHS, val |
4472                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4473
4474         if (!tg3_flag(tp, 5705_PLUS)) {
4475                 if (netif_carrier_ok(tp->dev)) {
4476                         tw32(HOSTCC_STAT_COAL_TICKS,
4477                              tp->coal.stats_block_coalesce_usecs);
4478                 } else {
4479                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4480                 }
4481         }
4482
4483         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4484                 val = tr32(PCIE_PWR_MGMT_THRESH);
4485                 if (!netif_carrier_ok(tp->dev))
4486                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4487                               tp->pwrmgmt_thresh;
4488                 else
4489                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4490                 tw32(PCIE_PWR_MGMT_THRESH, val);
4491         }
4492
4493         return err;
4494 }
4495
4496 static inline int tg3_irq_sync(struct tg3 *tp)
4497 {
4498         return tp->irq_sync;
4499 }
4500
4501 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4502 {
4503         int i;
4504
4505         dst = (u32 *)((u8 *)dst + off);
4506         for (i = 0; i < len; i += sizeof(u32))
4507                 *dst++ = tr32(off + i);
4508 }
4509
4510 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4511 {
4512         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4513         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4514         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4515         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4516         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4517         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4518         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4519         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4520         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4521         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4522         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4525         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4526         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4527         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4528         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4529         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4531
4532         if (tg3_flag(tp, SUPPORT_MSIX))
4533                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4534
4535         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4536         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4537         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4538         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4540         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4543
4544         if (!tg3_flag(tp, 5705_PLUS)) {
4545                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4546                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4548         }
4549
4550         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4551         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4552         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4553         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4554         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4555
4556         if (tg3_flag(tp, NVRAM))
4557                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4558 }
4559
4560 static void tg3_dump_state(struct tg3 *tp)
4561 {
4562         int i;
4563         u32 *regs;
4564
4565         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4566         if (!regs) {
4567                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4568                 return;
4569         }
4570
4571         if (tg3_flag(tp, PCI_EXPRESS)) {
4572                 /* Read up to but not including private PCI registers */
4573                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4574                         regs[i / sizeof(u32)] = tr32(i);
4575         } else
4576                 tg3_dump_legacy_regs(tp, regs);
4577
4578         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4579                 if (!regs[i + 0] && !regs[i + 1] &&
4580                     !regs[i + 2] && !regs[i + 3])
4581                         continue;
4582
4583                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4584                            i * 4,
4585                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4586         }
4587
4588         kfree(regs);
4589
4590         for (i = 0; i < tp->irq_cnt; i++) {
4591                 struct tg3_napi *tnapi = &tp->napi[i];
4592
4593                 /* SW status block */
4594                 netdev_err(tp->dev,
4595                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4596                            i,
4597                            tnapi->hw_status->status,
4598                            tnapi->hw_status->status_tag,
4599                            tnapi->hw_status->rx_jumbo_consumer,
4600                            tnapi->hw_status->rx_consumer,
4601                            tnapi->hw_status->rx_mini_consumer,
4602                            tnapi->hw_status->idx[0].rx_producer,
4603                            tnapi->hw_status->idx[0].tx_consumer);
4604
4605                 netdev_err(tp->dev,
4606                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4607                            i,
4608                            tnapi->last_tag, tnapi->last_irq_tag,
4609                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4610                            tnapi->rx_rcb_ptr,
4611                            tnapi->prodring.rx_std_prod_idx,
4612                            tnapi->prodring.rx_std_cons_idx,
4613                            tnapi->prodring.rx_jmb_prod_idx,
4614                            tnapi->prodring.rx_jmb_cons_idx);
4615         }
4616 }
4617
4618 /* This is called whenever we suspect that the system chipset is re-
4619  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4620  * is bogus tx completions. We try to recover by setting the
4621  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4622  * in the workqueue.
4623  */
4624 static void tg3_tx_recover(struct tg3 *tp)
4625 {
4626         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4627                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4628
4629         netdev_warn(tp->dev,
4630                     "The system may be re-ordering memory-mapped I/O "
4631                     "cycles to the network device, attempting to recover. "
4632                     "Please report the problem to the driver maintainer "
4633                     "and include system chipset information.\n");
4634
4635         spin_lock(&tp->lock);
4636         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4637         spin_unlock(&tp->lock);
4638 }
4639
4640 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4641 {
4642         /* Tell compiler to fetch tx indices from memory. */
4643         barrier();
4644         return tnapi->tx_pending -
4645                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4646 }
4647
4648 /* Tigon3 never reports partial packet sends.  So we do not
4649  * need special logic to handle SKBs that have not had all
4650  * of their frags sent yet, like SunGEM does.
4651  */
4652 static void tg3_tx(struct tg3_napi *tnapi)
4653 {
4654         struct tg3 *tp = tnapi->tp;
4655         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4656         u32 sw_idx = tnapi->tx_cons;
4657         struct netdev_queue *txq;
4658         int index = tnapi - tp->napi;
4659
4660         if (tg3_flag(tp, ENABLE_TSS))
4661                 index--;
4662
4663         txq = netdev_get_tx_queue(tp->dev, index);
4664
4665         while (sw_idx != hw_idx) {
4666                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4667                 struct sk_buff *skb = ri->skb;
4668                 int i, tx_bug = 0;
4669
4670                 if (unlikely(skb == NULL)) {
4671                         tg3_tx_recover(tp);
4672                         return;
4673                 }
4674
4675                 pci_unmap_single(tp->pdev,
4676                                  dma_unmap_addr(ri, mapping),
4677                                  skb_headlen(skb),
4678                                  PCI_DMA_TODEVICE);
4679
4680                 ri->skb = NULL;
4681
4682                 sw_idx = NEXT_TX(sw_idx);
4683
4684                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4685                         ri = &tnapi->tx_buffers[sw_idx];
4686                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4687                                 tx_bug = 1;
4688
4689                         pci_unmap_page(tp->pdev,
4690                                        dma_unmap_addr(ri, mapping),
4691                                        skb_shinfo(skb)->frags[i].size,
4692                                        PCI_DMA_TODEVICE);
4693                         sw_idx = NEXT_TX(sw_idx);
4694                 }
4695
4696                 dev_kfree_skb(skb);
4697
4698                 if (unlikely(tx_bug)) {
4699                         tg3_tx_recover(tp);
4700                         return;
4701                 }
4702         }
4703
4704         tnapi->tx_cons = sw_idx;
4705
4706         /* Need to make the tx_cons update visible to tg3_start_xmit()
4707          * before checking for netif_queue_stopped().  Without the
4708          * memory barrier, there is a small possibility that tg3_start_xmit()
4709          * will miss it and cause the queue to be stopped forever.
4710          */
4711         smp_mb();
4712
4713         if (unlikely(netif_tx_queue_stopped(txq) &&
4714                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4715                 __netif_tx_lock(txq, smp_processor_id());
4716                 if (netif_tx_queue_stopped(txq) &&
4717                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4718                         netif_tx_wake_queue(txq);
4719                 __netif_tx_unlock(txq);
4720         }
4721 }
4722
4723 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4724 {
4725         if (!ri->skb)
4726                 return;
4727
4728         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4729                          map_sz, PCI_DMA_FROMDEVICE);
4730         dev_kfree_skb_any(ri->skb);
4731         ri->skb = NULL;
4732 }
4733
4734 /* Returns size of skb allocated or < 0 on error.
4735  *
4736  * We only need to fill in the address because the other members
4737  * of the RX descriptor are invariant, see tg3_init_rings.
4738  *
4739  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4740  * posting buffers we only dirty the first cache line of the RX
4741  * descriptor (containing the address).  Whereas for the RX status
4742  * buffers the cpu only reads the last cacheline of the RX descriptor
4743  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4744  */
4745 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4746                             u32 opaque_key, u32 dest_idx_unmasked)
4747 {
4748         struct tg3_rx_buffer_desc *desc;
4749         struct ring_info *map;
4750         struct sk_buff *skb;
4751         dma_addr_t mapping;
4752         int skb_size, dest_idx;
4753
4754         switch (opaque_key) {
4755         case RXD_OPAQUE_RING_STD:
4756                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4757                 desc = &tpr->rx_std[dest_idx];
4758                 map = &tpr->rx_std_buffers[dest_idx];
4759                 skb_size = tp->rx_pkt_map_sz;
4760                 break;
4761
4762         case RXD_OPAQUE_RING_JUMBO:
4763                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4764                 desc = &tpr->rx_jmb[dest_idx].std;
4765                 map = &tpr->rx_jmb_buffers[dest_idx];
4766                 skb_size = TG3_RX_JMB_MAP_SZ;
4767                 break;
4768
4769         default:
4770                 return -EINVAL;
4771         }
4772
4773         /* Do not overwrite any of the map or rp information
4774          * until we are sure we can commit to a new buffer.
4775          *
4776          * Callers depend upon this behavior and assume that
4777          * we leave everything unchanged if we fail.
4778          */
4779         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4780         if (skb == NULL)
4781                 return -ENOMEM;
4782
4783         skb_reserve(skb, tp->rx_offset);
4784
4785         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4786                                  PCI_DMA_FROMDEVICE);
4787         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4788                 dev_kfree_skb(skb);
4789                 return -EIO;
4790         }
4791
4792         map->skb = skb;
4793         dma_unmap_addr_set(map, mapping, mapping);
4794
4795         desc->addr_hi = ((u64)mapping >> 32);
4796         desc->addr_lo = ((u64)mapping & 0xffffffff);
4797
4798         return skb_size;
4799 }
4800
4801 /* We only need to move over in the address because the other
4802  * members of the RX descriptor are invariant.  See notes above
4803  * tg3_alloc_rx_skb for full details.
4804  */
4805 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4806                            struct tg3_rx_prodring_set *dpr,
4807                            u32 opaque_key, int src_idx,
4808                            u32 dest_idx_unmasked)
4809 {
4810         struct tg3 *tp = tnapi->tp;
4811         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4812         struct ring_info *src_map, *dest_map;
4813         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4814         int dest_idx;
4815
4816         switch (opaque_key) {
4817         case RXD_OPAQUE_RING_STD:
4818                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4819                 dest_desc = &dpr->rx_std[dest_idx];
4820                 dest_map = &dpr->rx_std_buffers[dest_idx];
4821                 src_desc = &spr->rx_std[src_idx];
4822                 src_map = &spr->rx_std_buffers[src_idx];
4823                 break;
4824
4825         case RXD_OPAQUE_RING_JUMBO:
4826                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4827                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4828                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4829                 src_desc = &spr->rx_jmb[src_idx].std;
4830                 src_map = &spr->rx_jmb_buffers[src_idx];
4831                 break;
4832
4833         default:
4834                 return;
4835         }
4836
4837         dest_map->skb = src_map->skb;
4838         dma_unmap_addr_set(dest_map, mapping,
4839                            dma_unmap_addr(src_map, mapping));
4840         dest_desc->addr_hi = src_desc->addr_hi;
4841         dest_desc->addr_lo = src_desc->addr_lo;
4842
4843         /* Ensure that the update to the skb happens after the physical
4844          * addresses have been transferred to the new BD location.
4845          */
4846         smp_wmb();
4847
4848         src_map->skb = NULL;
4849 }
4850
4851 /* The RX ring scheme is composed of multiple rings which post fresh
4852  * buffers to the chip, and one special ring the chip uses to report
4853  * status back to the host.
4854  *
4855  * The special ring reports the status of received packets to the
4856  * host.  The chip does not write into the original descriptor the
4857  * RX buffer was obtained from.  The chip simply takes the original
4858  * descriptor as provided by the host, updates the status and length
4859  * field, then writes this into the next status ring entry.
4860  *
4861  * Each ring the host uses to post buffers to the chip is described
4862  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4863  * it is first placed into the on-chip ram.  When the packet's length
4864  * is known, it walks down the TG3_BDINFO entries to select the ring.
4865  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4866  * which is within the range of the new packet's length is chosen.
4867  *
4868  * The "separate ring for rx status" scheme may sound queer, but it makes
4869  * sense from a cache coherency perspective.  If only the host writes
4870  * to the buffer post rings, and only the chip writes to the rx status
4871  * rings, then cache lines never move beyond shared-modified state.
4872  * If both the host and chip were to write into the same ring, cache line
4873  * eviction could occur since both entities want it in an exclusive state.
4874  */
4875 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4876 {
4877         struct tg3 *tp = tnapi->tp;
4878         u32 work_mask, rx_std_posted = 0;
4879         u32 std_prod_idx, jmb_prod_idx;
4880         u32 sw_idx = tnapi->rx_rcb_ptr;
4881         u16 hw_idx;
4882         int received;
4883         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4884
4885         hw_idx = *(tnapi->rx_rcb_prod_idx);
4886         /*
4887          * We need to order the read of hw_idx and the read of
4888          * the opaque cookie.
4889          */
4890         rmb();
4891         work_mask = 0;
4892         received = 0;
4893         std_prod_idx = tpr->rx_std_prod_idx;
4894         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4895         while (sw_idx != hw_idx && budget > 0) {
4896                 struct ring_info *ri;
4897                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4898                 unsigned int len;
4899                 struct sk_buff *skb;
4900                 dma_addr_t dma_addr;
4901                 u32 opaque_key, desc_idx, *post_ptr;
4902
4903                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4904                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4905                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4906                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4907                         dma_addr = dma_unmap_addr(ri, mapping);
4908                         skb = ri->skb;
4909                         post_ptr = &std_prod_idx;
4910                         rx_std_posted++;
4911                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4912                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4913                         dma_addr = dma_unmap_addr(ri, mapping);
4914                         skb = ri->skb;
4915                         post_ptr = &jmb_prod_idx;
4916                 } else
4917                         goto next_pkt_nopost;
4918
4919                 work_mask |= opaque_key;
4920
4921                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4922                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4923                 drop_it:
4924                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4925                                        desc_idx, *post_ptr);
4926                 drop_it_no_recycle:
4927                         /* Other statistics kept track of by card. */
4928                         tp->rx_dropped++;
4929                         goto next_pkt;
4930                 }
4931
4932                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4933                       ETH_FCS_LEN;
4934
4935                 if (len > TG3_RX_COPY_THRESH(tp)) {
4936                         int skb_size;
4937
4938                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4939                                                     *post_ptr);
4940                         if (skb_size < 0)
4941                                 goto drop_it;
4942
4943                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4944                                          PCI_DMA_FROMDEVICE);
4945
4946                         /* Ensure that the update to the skb happens
4947                          * after the usage of the old DMA mapping.
4948                          */
4949                         smp_wmb();
4950
4951                         ri->skb = NULL;
4952
4953                         skb_put(skb, len);
4954                 } else {
4955                         struct sk_buff *copy_skb;
4956
4957                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4958                                        desc_idx, *post_ptr);
4959
4960                         copy_skb = netdev_alloc_skb(tp->dev, len +
4961                                                     TG3_RAW_IP_ALIGN);
4962                         if (copy_skb == NULL)
4963                                 goto drop_it_no_recycle;
4964
4965                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4966                         skb_put(copy_skb, len);
4967                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4968                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4969                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4970
4971                         /* We'll reuse the original ring buffer. */
4972                         skb = copy_skb;
4973                 }
4974
4975                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4976                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4977                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4978                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4979                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4980                 else
4981                         skb_checksum_none_assert(skb);
4982
4983                 skb->protocol = eth_type_trans(skb, tp->dev);
4984
4985                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4986                     skb->protocol != htons(ETH_P_8021Q)) {
4987                         dev_kfree_skb(skb);
4988                         goto drop_it_no_recycle;
4989                 }
4990
4991                 if (desc->type_flags & RXD_FLAG_VLAN &&
4992                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4993                         __vlan_hwaccel_put_tag(skb,
4994                                                desc->err_vlan & RXD_VLAN_MASK);
4995
4996                 napi_gro_receive(&tnapi->napi, skb);
4997
4998                 received++;
4999                 budget--;
5000
5001 next_pkt:
5002                 (*post_ptr)++;
5003
5004                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5005                         tpr->rx_std_prod_idx = std_prod_idx &
5006                                                tp->rx_std_ring_mask;
5007                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5008                                      tpr->rx_std_prod_idx);
5009                         work_mask &= ~RXD_OPAQUE_RING_STD;
5010                         rx_std_posted = 0;
5011                 }
5012 next_pkt_nopost:
5013                 sw_idx++;
5014                 sw_idx &= tp->rx_ret_ring_mask;
5015
5016                 /* Refresh hw_idx to see if there is new work */
5017                 if (sw_idx == hw_idx) {
5018                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5019                         rmb();
5020                 }
5021         }
5022
5023         /* ACK the status ring. */
5024         tnapi->rx_rcb_ptr = sw_idx;
5025         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5026
5027         /* Refill RX ring(s). */
5028         if (!tg3_flag(tp, ENABLE_RSS)) {
5029                 if (work_mask & RXD_OPAQUE_RING_STD) {
5030                         tpr->rx_std_prod_idx = std_prod_idx &
5031                                                tp->rx_std_ring_mask;
5032                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5033                                      tpr->rx_std_prod_idx);
5034                 }
5035                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5036                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5037                                                tp->rx_jmb_ring_mask;
5038                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5039                                      tpr->rx_jmb_prod_idx);
5040                 }
5041                 mmiowb();
5042         } else if (work_mask) {
5043                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5044                  * updated before the producer indices can be updated.
5045                  */
5046                 smp_wmb();
5047
5048                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5049                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5050
5051                 if (tnapi != &tp->napi[1])
5052                         napi_schedule(&tp->napi[1].napi);
5053         }
5054
5055         return received;
5056 }
5057
5058 static void tg3_poll_link(struct tg3 *tp)
5059 {
5060         /* handle link change and other phy events */
5061         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5062                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5063
5064                 if (sblk->status & SD_STATUS_LINK_CHG) {
5065                         sblk->status = SD_STATUS_UPDATED |
5066                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5067                         spin_lock(&tp->lock);
5068                         if (tg3_flag(tp, USE_PHYLIB)) {
5069                                 tw32_f(MAC_STATUS,
5070                                      (MAC_STATUS_SYNC_CHANGED |
5071                                       MAC_STATUS_CFG_CHANGED |
5072                                       MAC_STATUS_MI_COMPLETION |
5073                                       MAC_STATUS_LNKSTATE_CHANGED));
5074                                 udelay(40);
5075                         } else
5076                                 tg3_setup_phy(tp, 0);
5077                         spin_unlock(&tp->lock);
5078                 }
5079         }
5080 }
5081
5082 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5083                                 struct tg3_rx_prodring_set *dpr,
5084                                 struct tg3_rx_prodring_set *spr)
5085 {
5086         u32 si, di, cpycnt, src_prod_idx;
5087         int i, err = 0;
5088
5089         while (1) {
5090                 src_prod_idx = spr->rx_std_prod_idx;
5091
5092                 /* Make sure updates to the rx_std_buffers[] entries and the
5093                  * standard producer index are seen in the correct order.
5094                  */
5095                 smp_rmb();
5096
5097                 if (spr->rx_std_cons_idx == src_prod_idx)
5098                         break;
5099
5100                 if (spr->rx_std_cons_idx < src_prod_idx)
5101                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5102                 else
5103                         cpycnt = tp->rx_std_ring_mask + 1 -
5104                                  spr->rx_std_cons_idx;
5105
5106                 cpycnt = min(cpycnt,
5107                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5108
5109                 si = spr->rx_std_cons_idx;
5110                 di = dpr->rx_std_prod_idx;
5111
5112                 for (i = di; i < di + cpycnt; i++) {
5113                         if (dpr->rx_std_buffers[i].skb) {
5114                                 cpycnt = i - di;
5115                                 err = -ENOSPC;
5116                                 break;
5117                         }
5118                 }
5119
5120                 if (!cpycnt)
5121                         break;
5122
5123                 /* Ensure that updates to the rx_std_buffers ring and the
5124                  * shadowed hardware producer ring from tg3_recycle_skb() are
5125                  * ordered correctly WRT the skb check above.
5126                  */
5127                 smp_rmb();
5128
5129                 memcpy(&dpr->rx_std_buffers[di],
5130                        &spr->rx_std_buffers[si],
5131                        cpycnt * sizeof(struct ring_info));
5132
5133                 for (i = 0; i < cpycnt; i++, di++, si++) {
5134                         struct tg3_rx_buffer_desc *sbd, *dbd;
5135                         sbd = &spr->rx_std[si];
5136                         dbd = &dpr->rx_std[di];
5137                         dbd->addr_hi = sbd->addr_hi;
5138                         dbd->addr_lo = sbd->addr_lo;
5139                 }
5140
5141                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5142                                        tp->rx_std_ring_mask;
5143                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5144                                        tp->rx_std_ring_mask;
5145         }
5146
5147         while (1) {
5148                 src_prod_idx = spr->rx_jmb_prod_idx;
5149
5150                 /* Make sure updates to the rx_jmb_buffers[] entries and
5151                  * the jumbo producer index are seen in the correct order.
5152                  */
5153                 smp_rmb();
5154
5155                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5156                         break;
5157
5158                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5159                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5160                 else
5161                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5162                                  spr->rx_jmb_cons_idx;
5163
5164                 cpycnt = min(cpycnt,
5165                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5166
5167                 si = spr->rx_jmb_cons_idx;
5168                 di = dpr->rx_jmb_prod_idx;
5169
5170                 for (i = di; i < di + cpycnt; i++) {
5171                         if (dpr->rx_jmb_buffers[i].skb) {
5172                                 cpycnt = i - di;
5173                                 err = -ENOSPC;
5174                                 break;
5175                         }
5176                 }
5177
5178                 if (!cpycnt)
5179                         break;
5180
5181                 /* Ensure that updates to the rx_jmb_buffers ring and the
5182                  * shadowed hardware producer ring from tg3_recycle_skb() are
5183                  * ordered correctly WRT the skb check above.
5184                  */
5185                 smp_rmb();
5186
5187                 memcpy(&dpr->rx_jmb_buffers[di],
5188                        &spr->rx_jmb_buffers[si],
5189                        cpycnt * sizeof(struct ring_info));
5190
5191                 for (i = 0; i < cpycnt; i++, di++, si++) {
5192                         struct tg3_rx_buffer_desc *sbd, *dbd;
5193                         sbd = &spr->rx_jmb[si].std;
5194                         dbd = &dpr->rx_jmb[di].std;
5195                         dbd->addr_hi = sbd->addr_hi;
5196                         dbd->addr_lo = sbd->addr_lo;
5197                 }
5198
5199                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5200                                        tp->rx_jmb_ring_mask;
5201                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5202                                        tp->rx_jmb_ring_mask;
5203         }
5204
5205         return err;
5206 }
5207
5208 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5209 {
5210         struct tg3 *tp = tnapi->tp;
5211
5212         /* run TX completion thread */
5213         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5214                 tg3_tx(tnapi);
5215                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5216                         return work_done;
5217         }
5218
5219         /* run RX thread, within the bounds set by NAPI.
5220          * All RX "locking" is done by ensuring outside
5221          * code synchronizes with tg3->napi.poll()
5222          */
5223         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5224                 work_done += tg3_rx(tnapi, budget - work_done);
5225
5226         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5227                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5228                 int i, err = 0;
5229                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5230                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5231
5232                 for (i = 1; i < tp->irq_cnt; i++)
5233                         err |= tg3_rx_prodring_xfer(tp, dpr,
5234                                                     &tp->napi[i].prodring);
5235
5236                 wmb();
5237
5238                 if (std_prod_idx != dpr->rx_std_prod_idx)
5239                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5240                                      dpr->rx_std_prod_idx);
5241
5242                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5243                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5244                                      dpr->rx_jmb_prod_idx);
5245
5246                 mmiowb();
5247
5248                 if (err)
5249                         tw32_f(HOSTCC_MODE, tp->coal_now);
5250         }
5251
5252         return work_done;
5253 }
5254
5255 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5256 {
5257         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5258         struct tg3 *tp = tnapi->tp;
5259         int work_done = 0;
5260         struct tg3_hw_status *sblk = tnapi->hw_status;
5261
5262         while (1) {
5263                 work_done = tg3_poll_work(tnapi, work_done, budget);
5264
5265                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5266                         goto tx_recovery;
5267
5268                 if (unlikely(work_done >= budget))
5269                         break;
5270
5271                 /* tp->last_tag is used in tg3_int_reenable() below
5272                  * to tell the hw how much work has been processed,
5273                  * so we must read it before checking for more work.
5274                  */
5275                 tnapi->last_tag = sblk->status_tag;
5276                 tnapi->last_irq_tag = tnapi->last_tag;
5277                 rmb();
5278
5279                 /* check for RX/TX work to do */
5280                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5281                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5282                         napi_complete(napi);
5283                         /* Reenable interrupts. */
5284                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5285                         mmiowb();
5286                         break;
5287                 }
5288         }
5289
5290         return work_done;
5291
5292 tx_recovery:
5293         /* work_done is guaranteed to be less than budget. */
5294         napi_complete(napi);
5295         schedule_work(&tp->reset_task);
5296         return work_done;
5297 }
5298
5299 static void tg3_process_error(struct tg3 *tp)
5300 {
5301         u32 val;
5302         bool real_error = false;
5303
5304         if (tg3_flag(tp, ERROR_PROCESSED))
5305                 return;
5306
5307         /* Check Flow Attention register */
5308         val = tr32(HOSTCC_FLOW_ATTN);
5309         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5310                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5311                 real_error = true;
5312         }
5313
5314         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5315                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5316                 real_error = true;
5317         }
5318
5319         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5320                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5321                 real_error = true;
5322         }
5323
5324         if (!real_error)
5325                 return;
5326
5327         tg3_dump_state(tp);
5328
5329         tg3_flag_set(tp, ERROR_PROCESSED);
5330         schedule_work(&tp->reset_task);
5331 }
5332
5333 static int tg3_poll(struct napi_struct *napi, int budget)
5334 {
5335         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5336         struct tg3 *tp = tnapi->tp;
5337         int work_done = 0;
5338         struct tg3_hw_status *sblk = tnapi->hw_status;
5339
5340         while (1) {
5341                 if (sblk->status & SD_STATUS_ERROR)
5342                         tg3_process_error(tp);
5343
5344                 tg3_poll_link(tp);
5345
5346                 work_done = tg3_poll_work(tnapi, work_done, budget);
5347
5348                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5349                         goto tx_recovery;
5350
5351                 if (unlikely(work_done >= budget))
5352                         break;
5353
5354                 if (tg3_flag(tp, TAGGED_STATUS)) {
5355                         /* tp->last_tag is used in tg3_int_reenable() below
5356                          * to tell the hw how much work has been processed,
5357                          * so we must read it before checking for more work.
5358                          */
5359                         tnapi->last_tag = sblk->status_tag;
5360                         tnapi->last_irq_tag = tnapi->last_tag;
5361                         rmb();
5362                 } else
5363                         sblk->status &= ~SD_STATUS_UPDATED;
5364
5365                 if (likely(!tg3_has_work(tnapi))) {
5366                         napi_complete(napi);
5367                         tg3_int_reenable(tnapi);
5368                         break;
5369                 }
5370         }
5371
5372         return work_done;
5373
5374 tx_recovery:
5375         /* work_done is guaranteed to be less than budget. */
5376         napi_complete(napi);
5377         schedule_work(&tp->reset_task);
5378         return work_done;
5379 }
5380
5381 static void tg3_napi_disable(struct tg3 *tp)
5382 {
5383         int i;
5384
5385         for (i = tp->irq_cnt - 1; i >= 0; i--)
5386                 napi_disable(&tp->napi[i].napi);
5387 }
5388
5389 static void tg3_napi_enable(struct tg3 *tp)
5390 {
5391         int i;
5392
5393         for (i = 0; i < tp->irq_cnt; i++)
5394                 napi_enable(&tp->napi[i].napi);
5395 }
5396
5397 static void tg3_napi_init(struct tg3 *tp)
5398 {
5399         int i;
5400
5401         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5402         for (i = 1; i < tp->irq_cnt; i++)
5403                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5404 }
5405
5406 static void tg3_napi_fini(struct tg3 *tp)
5407 {
5408         int i;
5409
5410         for (i = 0; i < tp->irq_cnt; i++)
5411                 netif_napi_del(&tp->napi[i].napi);
5412 }
5413
5414 static inline void tg3_netif_stop(struct tg3 *tp)
5415 {
5416         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5417         tg3_napi_disable(tp);
5418         netif_tx_disable(tp->dev);
5419 }
5420
5421 static inline void tg3_netif_start(struct tg3 *tp)
5422 {
5423         /* NOTE: unconditional netif_tx_wake_all_queues is only
5424          * appropriate so long as all callers are assured to
5425          * have free tx slots (such as after tg3_init_hw)
5426          */
5427         netif_tx_wake_all_queues(tp->dev);
5428
5429         tg3_napi_enable(tp);
5430         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5431         tg3_enable_ints(tp);
5432 }
5433
5434 static void tg3_irq_quiesce(struct tg3 *tp)
5435 {
5436         int i;
5437
5438         BUG_ON(tp->irq_sync);
5439
5440         tp->irq_sync = 1;
5441         smp_mb();
5442
5443         for (i = 0; i < tp->irq_cnt; i++)
5444                 synchronize_irq(tp->napi[i].irq_vec);
5445 }
5446
5447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5448  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5449  * with as well.  Most of the time, this is not necessary except when
5450  * shutting down the device.
5451  */
5452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5453 {
5454         spin_lock_bh(&tp->lock);
5455         if (irq_sync)
5456                 tg3_irq_quiesce(tp);
5457 }
5458
5459 static inline void tg3_full_unlock(struct tg3 *tp)
5460 {
5461         spin_unlock_bh(&tp->lock);
5462 }
5463
5464 /* One-shot MSI handler - Chip automatically disables interrupt
5465  * after sending MSI so driver doesn't have to do it.
5466  */
5467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5468 {
5469         struct tg3_napi *tnapi = dev_id;
5470         struct tg3 *tp = tnapi->tp;
5471
5472         prefetch(tnapi->hw_status);
5473         if (tnapi->rx_rcb)
5474                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5475
5476         if (likely(!tg3_irq_sync(tp)))
5477                 napi_schedule(&tnapi->napi);
5478
5479         return IRQ_HANDLED;
5480 }
5481
5482 /* MSI ISR - No need to check for interrupt sharing and no need to
5483  * flush status block and interrupt mailbox. PCI ordering rules
5484  * guarantee that MSI will arrive after the status block.
5485  */
5486 static irqreturn_t tg3_msi(int irq, void *dev_id)
5487 {
5488         struct tg3_napi *tnapi = dev_id;
5489         struct tg3 *tp = tnapi->tp;
5490
5491         prefetch(tnapi->hw_status);
5492         if (tnapi->rx_rcb)
5493                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5494         /*
5495          * Writing any value to intr-mbox-0 clears PCI INTA# and
5496          * chip-internal interrupt pending events.
5497          * Writing non-zero to intr-mbox-0 additional tells the
5498          * NIC to stop sending us irqs, engaging "in-intr-handler"
5499          * event coalescing.
5500          */
5501         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5502         if (likely(!tg3_irq_sync(tp)))
5503                 napi_schedule(&tnapi->napi);
5504
5505         return IRQ_RETVAL(1);
5506 }
5507
5508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5509 {
5510         struct tg3_napi *tnapi = dev_id;
5511         struct tg3 *tp = tnapi->tp;
5512         struct tg3_hw_status *sblk = tnapi->hw_status;
5513         unsigned int handled = 1;
5514
5515         /* In INTx mode, it is possible for the interrupt to arrive at
5516          * the CPU before the status block posted prior to the interrupt.
5517          * Reading the PCI State register will confirm whether the
5518          * interrupt is ours and will flush the status block.
5519          */
5520         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5521                 if (tg3_flag(tp, CHIP_RESETTING) ||
5522                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5523                         handled = 0;
5524                         goto out;
5525                 }
5526         }
5527
5528         /*
5529          * Writing any value to intr-mbox-0 clears PCI INTA# and
5530          * chip-internal interrupt pending events.
5531          * Writing non-zero to intr-mbox-0 additional tells the
5532          * NIC to stop sending us irqs, engaging "in-intr-handler"
5533          * event coalescing.
5534          *
5535          * Flush the mailbox to de-assert the IRQ immediately to prevent
5536          * spurious interrupts.  The flush impacts performance but
5537          * excessive spurious interrupts can be worse in some cases.
5538          */
5539         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5540         if (tg3_irq_sync(tp))
5541                 goto out;
5542         sblk->status &= ~SD_STATUS_UPDATED;
5543         if (likely(tg3_has_work(tnapi))) {
5544                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5545                 napi_schedule(&tnapi->napi);
5546         } else {
5547                 /* No work, shared interrupt perhaps?  re-enable
5548                  * interrupts, and flush that PCI write
5549                  */
5550                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5551                                0x00000000);
5552         }
5553 out:
5554         return IRQ_RETVAL(handled);
5555 }
5556
5557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5558 {
5559         struct tg3_napi *tnapi = dev_id;
5560         struct tg3 *tp = tnapi->tp;
5561         struct tg3_hw_status *sblk = tnapi->hw_status;
5562         unsigned int handled = 1;
5563
5564         /* In INTx mode, it is possible for the interrupt to arrive at
5565          * the CPU before the status block posted prior to the interrupt.
5566          * Reading the PCI State register will confirm whether the
5567          * interrupt is ours and will flush the status block.
5568          */
5569         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5570                 if (tg3_flag(tp, CHIP_RESETTING) ||
5571                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5572                         handled = 0;
5573                         goto out;
5574                 }
5575         }
5576
5577         /*
5578          * writing any value to intr-mbox-0 clears PCI INTA# and
5579          * chip-internal interrupt pending events.
5580          * writing non-zero to intr-mbox-0 additional tells the
5581          * NIC to stop sending us irqs, engaging "in-intr-handler"
5582          * event coalescing.
5583          *
5584          * Flush the mailbox to de-assert the IRQ immediately to prevent
5585          * spurious interrupts.  The flush impacts performance but
5586          * excessive spurious interrupts can be worse in some cases.
5587          */
5588         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5589
5590         /*
5591          * In a shared interrupt configuration, sometimes other devices'
5592          * interrupts will scream.  We record the current status tag here
5593          * so that the above check can report that the screaming interrupts
5594          * are unhandled.  Eventually they will be silenced.
5595          */
5596         tnapi->last_irq_tag = sblk->status_tag;
5597
5598         if (tg3_irq_sync(tp))
5599                 goto out;
5600
5601         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5602
5603         napi_schedule(&tnapi->napi);
5604
5605 out:
5606         return IRQ_RETVAL(handled);
5607 }
5608
5609 /* ISR for interrupt test */
5610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5611 {
5612         struct tg3_napi *tnapi = dev_id;
5613         struct tg3 *tp = tnapi->tp;
5614         struct tg3_hw_status *sblk = tnapi->hw_status;
5615
5616         if ((sblk->status & SD_STATUS_UPDATED) ||
5617             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5618                 tg3_disable_ints(tp);
5619                 return IRQ_RETVAL(1);
5620         }
5621         return IRQ_RETVAL(0);
5622 }
5623
5624 static int tg3_init_hw(struct tg3 *, int);
5625 static int tg3_halt(struct tg3 *, int, int);
5626
5627 /* Restart hardware after configuration changes, self-test, etc.
5628  * Invoked with tp->lock held.
5629  */
5630 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5631         __releases(tp->lock)
5632         __acquires(tp->lock)
5633 {
5634         int err;
5635
5636         err = tg3_init_hw(tp, reset_phy);
5637         if (err) {
5638                 netdev_err(tp->dev,
5639                            "Failed to re-initialize device, aborting\n");
5640                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5641                 tg3_full_unlock(tp);
5642                 del_timer_sync(&tp->timer);
5643                 tp->irq_sync = 0;
5644                 tg3_napi_enable(tp);
5645                 dev_close(tp->dev);
5646                 tg3_full_lock(tp, 0);
5647         }
5648         return err;
5649 }
5650
5651 #ifdef CONFIG_NET_POLL_CONTROLLER
5652 static void tg3_poll_controller(struct net_device *dev)
5653 {
5654         int i;
5655         struct tg3 *tp = netdev_priv(dev);
5656
5657         for (i = 0; i < tp->irq_cnt; i++)
5658                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5659 }
5660 #endif
5661
5662 static void tg3_reset_task(struct work_struct *work)
5663 {
5664         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5665         int err;
5666         unsigned int restart_timer;
5667
5668         tg3_full_lock(tp, 0);
5669
5670         if (!netif_running(tp->dev)) {
5671                 tg3_full_unlock(tp);
5672                 return;
5673         }
5674
5675         tg3_full_unlock(tp);
5676
5677         tg3_phy_stop(tp);
5678
5679         tg3_netif_stop(tp);
5680
5681         tg3_full_lock(tp, 1);
5682
5683         restart_timer = tg3_flag(tp, RESTART_TIMER);
5684         tg3_flag_clear(tp, RESTART_TIMER);
5685
5686         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5687                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5688                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5689                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5690                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5691         }
5692
5693         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5694         err = tg3_init_hw(tp, 1);
5695         if (err)
5696                 goto out;
5697
5698         tg3_netif_start(tp);
5699
5700         if (restart_timer)
5701                 mod_timer(&tp->timer, jiffies + 1);
5702
5703 out:
5704         tg3_full_unlock(tp);
5705
5706         if (!err)
5707                 tg3_phy_start(tp);
5708 }
5709
5710 static void tg3_tx_timeout(struct net_device *dev)
5711 {
5712         struct tg3 *tp = netdev_priv(dev);
5713
5714         if (netif_msg_tx_err(tp)) {
5715                 netdev_err(dev, "transmit timed out, resetting\n");
5716                 tg3_dump_state(tp);
5717         }
5718
5719         schedule_work(&tp->reset_task);
5720 }
5721
5722 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5723 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5724 {
5725         u32 base = (u32) mapping & 0xffffffff;
5726
5727         return (base > 0xffffdcc0) && (base + len + 8 < base);
5728 }
5729
5730 /* Test for DMA addresses > 40-bit */
5731 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5732                                           int len)
5733 {
5734 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5735         if (tg3_flag(tp, 40BIT_DMA_BUG))
5736                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5737         return 0;
5738 #else
5739         return 0;
5740 #endif
5741 }
5742
5743 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5744                         dma_addr_t mapping, int len, u32 flags,
5745                         u32 mss_and_is_end)
5746 {
5747         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5748         int is_end = (mss_and_is_end & 0x1);
5749         u32 mss = (mss_and_is_end >> 1);
5750         u32 vlan_tag = 0;
5751
5752         if (is_end)
5753                 flags |= TXD_FLAG_END;
5754         if (flags & TXD_FLAG_VLAN) {
5755                 vlan_tag = flags >> 16;
5756                 flags &= 0xffff;
5757         }
5758         vlan_tag |= (mss << TXD_MSS_SHIFT);
5759
5760         txd->addr_hi = ((u64) mapping >> 32);
5761         txd->addr_lo = ((u64) mapping & 0xffffffff);
5762         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5763         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5764 }
5765
5766 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5767                                 struct sk_buff *skb, int last)
5768 {
5769         int i;
5770         u32 entry = tnapi->tx_prod;
5771         struct ring_info *txb = &tnapi->tx_buffers[entry];
5772
5773         pci_unmap_single(tnapi->tp->pdev,
5774                          dma_unmap_addr(txb, mapping),
5775                          skb_headlen(skb),
5776                          PCI_DMA_TODEVICE);
5777         for (i = 0; i <= last; i++) {
5778                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779
5780                 entry = NEXT_TX(entry);
5781                 txb = &tnapi->tx_buffers[entry];
5782
5783                 pci_unmap_page(tnapi->tp->pdev,
5784                                dma_unmap_addr(txb, mapping),
5785                                frag->size, PCI_DMA_TODEVICE);
5786         }
5787 }
5788
5789 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5790 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5791                                        struct sk_buff *skb,
5792                                        u32 base_flags, u32 mss)
5793 {
5794         struct tg3 *tp = tnapi->tp;
5795         struct sk_buff *new_skb;
5796         dma_addr_t new_addr = 0;
5797         u32 entry = tnapi->tx_prod;
5798         int ret = 0;
5799
5800         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5801                 new_skb = skb_copy(skb, GFP_ATOMIC);
5802         else {
5803                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5804
5805                 new_skb = skb_copy_expand(skb,
5806                                           skb_headroom(skb) + more_headroom,
5807                                           skb_tailroom(skb), GFP_ATOMIC);
5808         }
5809
5810         if (!new_skb) {
5811                 ret = -1;
5812         } else {
5813                 /* New SKB is guaranteed to be linear. */
5814                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5815                                           PCI_DMA_TODEVICE);
5816                 /* Make sure the mapping succeeded */
5817                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5818                         ret = -1;
5819                         dev_kfree_skb(new_skb);
5820
5821                 /* Make sure new skb does not cross any 4G boundaries.
5822                  * Drop the packet if it does.
5823                  */
5824                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5825                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5826                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5827                                          PCI_DMA_TODEVICE);
5828                         ret = -1;
5829                         dev_kfree_skb(new_skb);
5830                 } else {
5831                         tnapi->tx_buffers[entry].skb = new_skb;
5832                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5833                                            mapping, new_addr);
5834
5835                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5836                                     base_flags, 1 | (mss << 1));
5837                 }
5838         }
5839
5840         dev_kfree_skb(skb);
5841
5842         return ret;
5843 }
5844
5845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5846
5847 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5848  * TSO header is greater than 80 bytes.
5849  */
5850 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5851 {
5852         struct sk_buff *segs, *nskb;
5853         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5854
5855         /* Estimate the number of fragments in the worst case */
5856         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5857                 netif_stop_queue(tp->dev);
5858
5859                 /* netif_tx_stop_queue() must be done before checking
5860                  * checking tx index in tg3_tx_avail() below, because in
5861                  * tg3_tx(), we update tx index before checking for
5862                  * netif_tx_queue_stopped().
5863                  */
5864                 smp_mb();
5865                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5866                         return NETDEV_TX_BUSY;
5867
5868                 netif_wake_queue(tp->dev);
5869         }
5870
5871         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5872         if (IS_ERR(segs))
5873                 goto tg3_tso_bug_end;
5874
5875         do {
5876                 nskb = segs;
5877                 segs = segs->next;
5878                 nskb->next = NULL;
5879                 tg3_start_xmit(nskb, tp->dev);
5880         } while (segs);
5881
5882 tg3_tso_bug_end:
5883         dev_kfree_skb(skb);
5884
5885         return NETDEV_TX_OK;
5886 }
5887
5888 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5889  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890  */
5891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5892 {
5893         struct tg3 *tp = netdev_priv(dev);
5894         u32 len, entry, base_flags, mss;
5895         int i = -1, would_hit_hwbug;
5896         dma_addr_t mapping;
5897         struct tg3_napi *tnapi;
5898         struct netdev_queue *txq;
5899         unsigned int last;
5900
5901         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5902         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5903         if (tg3_flag(tp, ENABLE_TSS))
5904                 tnapi++;
5905
5906         /* We are running in BH disabled context with netif_tx_lock
5907          * and TX reclaim runs via tp->napi.poll inside of a software
5908          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5909          * no IRQ context deadlocks to worry about either.  Rejoice!
5910          */
5911         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5912                 if (!netif_tx_queue_stopped(txq)) {
5913                         netif_tx_stop_queue(txq);
5914
5915                         /* This is a hard error, log it. */
5916                         netdev_err(dev,
5917                                    "BUG! Tx Ring full when queue awake!\n");
5918                 }
5919                 return NETDEV_TX_BUSY;
5920         }
5921
5922         entry = tnapi->tx_prod;
5923         base_flags = 0;
5924         if (skb->ip_summed == CHECKSUM_PARTIAL)
5925                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5926
5927         mss = skb_shinfo(skb)->gso_size;
5928         if (mss) {
5929                 struct iphdr *iph;
5930                 u32 tcp_opt_len, hdr_len;
5931
5932                 if (skb_header_cloned(skb) &&
5933                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5934                         dev_kfree_skb(skb);
5935                         goto out_unlock;
5936                 }
5937
5938                 iph = ip_hdr(skb);
5939                 tcp_opt_len = tcp_optlen(skb);
5940
5941                 if (skb_is_gso_v6(skb)) {
5942                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5943                 } else {
5944                         u32 ip_tcp_len;
5945
5946                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5947                         hdr_len = ip_tcp_len + tcp_opt_len;
5948
5949                         iph->check = 0;
5950                         iph->tot_len = htons(mss + hdr_len);
5951                 }
5952
5953                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5954                     tg3_flag(tp, TSO_BUG))
5955                         return tg3_tso_bug(tp, skb);
5956
5957                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5958                                TXD_FLAG_CPU_POST_DMA);
5959
5960                 if (tg3_flag(tp, HW_TSO_1) ||
5961                     tg3_flag(tp, HW_TSO_2) ||
5962                     tg3_flag(tp, HW_TSO_3)) {
5963                         tcp_hdr(skb)->check = 0;
5964                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5965                 } else
5966                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5967                                                                  iph->daddr, 0,
5968                                                                  IPPROTO_TCP,
5969                                                                  0);
5970
5971                 if (tg3_flag(tp, HW_TSO_3)) {
5972                         mss |= (hdr_len & 0xc) << 12;
5973                         if (hdr_len & 0x10)
5974                                 base_flags |= 0x00000010;
5975                         base_flags |= (hdr_len & 0x3e0) << 5;
5976                 } else if (tg3_flag(tp, HW_TSO_2))
5977                         mss |= hdr_len << 9;
5978                 else if (tg3_flag(tp, HW_TSO_1) ||
5979                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5980                         if (tcp_opt_len || iph->ihl > 5) {
5981                                 int tsflags;
5982
5983                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5984                                 mss |= (tsflags << 11);
5985                         }
5986                 } else {
5987                         if (tcp_opt_len || iph->ihl > 5) {
5988                                 int tsflags;
5989
5990                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5991                                 base_flags |= tsflags << 12;
5992                         }
5993                 }
5994         }
5995
5996         if (vlan_tx_tag_present(skb))
5997                 base_flags |= (TXD_FLAG_VLAN |
5998                                (vlan_tx_tag_get(skb) << 16));
5999
6000         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6001             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6002                 base_flags |= TXD_FLAG_JMB_PKT;
6003
6004         len = skb_headlen(skb);
6005
6006         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6007         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6008                 dev_kfree_skb(skb);
6009                 goto out_unlock;
6010         }
6011
6012         tnapi->tx_buffers[entry].skb = skb;
6013         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6014
6015         would_hit_hwbug = 0;
6016
6017         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6018                 would_hit_hwbug = 1;
6019
6020         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6021             tg3_4g_overflow_test(mapping, len))
6022                 would_hit_hwbug = 1;
6023
6024         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6025             tg3_40bit_overflow_test(tp, mapping, len))
6026                 would_hit_hwbug = 1;
6027
6028         if (tg3_flag(tp, 5701_DMA_BUG))
6029                 would_hit_hwbug = 1;
6030
6031         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6032                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6033
6034         entry = NEXT_TX(entry);
6035
6036         /* Now loop through additional data fragments, and queue them. */
6037         if (skb_shinfo(skb)->nr_frags > 0) {
6038                 last = skb_shinfo(skb)->nr_frags - 1;
6039                 for (i = 0; i <= last; i++) {
6040                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6041
6042                         len = frag->size;
6043                         mapping = pci_map_page(tp->pdev,
6044                                                frag->page,
6045                                                frag->page_offset,
6046                                                len, PCI_DMA_TODEVICE);
6047
6048                         tnapi->tx_buffers[entry].skb = NULL;
6049                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6050                                            mapping);
6051                         if (pci_dma_mapping_error(tp->pdev, mapping))
6052                                 goto dma_error;
6053
6054                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6055                             len <= 8)
6056                                 would_hit_hwbug = 1;
6057
6058                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6059                             tg3_4g_overflow_test(mapping, len))
6060                                 would_hit_hwbug = 1;
6061
6062                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6063                             tg3_40bit_overflow_test(tp, mapping, len))
6064                                 would_hit_hwbug = 1;
6065
6066                         if (tg3_flag(tp, HW_TSO_1) ||
6067                             tg3_flag(tp, HW_TSO_2) ||
6068                             tg3_flag(tp, HW_TSO_3))
6069                                 tg3_set_txd(tnapi, entry, mapping, len,
6070                                             base_flags, (i == last)|(mss << 1));
6071                         else
6072                                 tg3_set_txd(tnapi, entry, mapping, len,
6073                                             base_flags, (i == last));
6074
6075                         entry = NEXT_TX(entry);
6076                 }
6077         }
6078
6079         if (would_hit_hwbug) {
6080                 tg3_skb_error_unmap(tnapi, skb, i);
6081
6082                 /* If the workaround fails due to memory/mapping
6083                  * failure, silently drop this packet.
6084                  */
6085                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6086                         goto out_unlock;
6087
6088                 entry = NEXT_TX(tnapi->tx_prod);
6089         }
6090
6091         /* Packets are ready, update Tx producer idx local and on card. */
6092         tw32_tx_mbox(tnapi->prodmbox, entry);
6093
6094         tnapi->tx_prod = entry;
6095         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6096                 netif_tx_stop_queue(txq);
6097
6098                 /* netif_tx_stop_queue() must be done before checking
6099                  * checking tx index in tg3_tx_avail() below, because in
6100                  * tg3_tx(), we update tx index before checking for
6101                  * netif_tx_queue_stopped().
6102                  */
6103                 smp_mb();
6104                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6105                         netif_tx_wake_queue(txq);
6106         }
6107
6108 out_unlock:
6109         mmiowb();
6110
6111         return NETDEV_TX_OK;
6112
6113 dma_error:
6114         tg3_skb_error_unmap(tnapi, skb, i);
6115         dev_kfree_skb(skb);
6116         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6117         return NETDEV_TX_OK;
6118 }
6119
6120 static void tg3_set_loopback(struct net_device *dev, u32 features)
6121 {
6122         struct tg3 *tp = netdev_priv(dev);
6123
6124         if (features & NETIF_F_LOOPBACK) {
6125                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6126                         return;
6127
6128                 /*
6129                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6130                  * loopback mode if Half-Duplex mode was negotiated earlier.
6131                  */
6132                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6133
6134                 /* Enable internal MAC loopback mode */
6135                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6136                 spin_lock_bh(&tp->lock);
6137                 tw32(MAC_MODE, tp->mac_mode);
6138                 netif_carrier_on(tp->dev);
6139                 spin_unlock_bh(&tp->lock);
6140                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6141         } else {
6142                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6143                         return;
6144
6145                 /* Disable internal MAC loopback mode */
6146                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6147                 spin_lock_bh(&tp->lock);
6148                 tw32(MAC_MODE, tp->mac_mode);
6149                 /* Force link status check */
6150                 tg3_setup_phy(tp, 1);
6151                 spin_unlock_bh(&tp->lock);
6152                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6153         }
6154 }
6155
6156 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6157 {
6158         struct tg3 *tp = netdev_priv(dev);
6159
6160         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6161                 features &= ~NETIF_F_ALL_TSO;
6162
6163         return features;
6164 }
6165
6166 static int tg3_set_features(struct net_device *dev, u32 features)
6167 {
6168         u32 changed = dev->features ^ features;
6169
6170         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6171                 tg3_set_loopback(dev, features);
6172
6173         return 0;
6174 }
6175
6176 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6177                                int new_mtu)
6178 {
6179         dev->mtu = new_mtu;
6180
6181         if (new_mtu > ETH_DATA_LEN) {
6182                 if (tg3_flag(tp, 5780_CLASS)) {
6183                         netdev_update_features(dev);
6184                         tg3_flag_clear(tp, TSO_CAPABLE);
6185                 } else {
6186                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6187                 }
6188         } else {
6189                 if (tg3_flag(tp, 5780_CLASS)) {
6190                         tg3_flag_set(tp, TSO_CAPABLE);
6191                         netdev_update_features(dev);
6192                 }
6193                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6194         }
6195 }
6196
6197 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6198 {
6199         struct tg3 *tp = netdev_priv(dev);
6200         int err;
6201
6202         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6203                 return -EINVAL;
6204
6205         if (!netif_running(dev)) {
6206                 /* We'll just catch it later when the
6207                  * device is up'd.
6208                  */
6209                 tg3_set_mtu(dev, tp, new_mtu);
6210                 return 0;
6211         }
6212
6213         tg3_phy_stop(tp);
6214
6215         tg3_netif_stop(tp);
6216
6217         tg3_full_lock(tp, 1);
6218
6219         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6220
6221         tg3_set_mtu(dev, tp, new_mtu);
6222
6223         err = tg3_restart_hw(tp, 0);
6224
6225         if (!err)
6226                 tg3_netif_start(tp);
6227
6228         tg3_full_unlock(tp);
6229
6230         if (!err)
6231                 tg3_phy_start(tp);
6232
6233         return err;
6234 }
6235
6236 static void tg3_rx_prodring_free(struct tg3 *tp,
6237                                  struct tg3_rx_prodring_set *tpr)
6238 {
6239         int i;
6240
6241         if (tpr != &tp->napi[0].prodring) {
6242                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6243                      i = (i + 1) & tp->rx_std_ring_mask)
6244                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6245                                         tp->rx_pkt_map_sz);
6246
6247                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6248                         for (i = tpr->rx_jmb_cons_idx;
6249                              i != tpr->rx_jmb_prod_idx;
6250                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6251                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6252                                                 TG3_RX_JMB_MAP_SZ);
6253                         }
6254                 }
6255
6256                 return;
6257         }
6258
6259         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6260                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6261                                 tp->rx_pkt_map_sz);
6262
6263         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6264                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6265                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6266                                         TG3_RX_JMB_MAP_SZ);
6267         }
6268 }
6269
6270 /* Initialize rx rings for packet processing.
6271  *
6272  * The chip has been shut down and the driver detached from
6273  * the networking, so no interrupts or new tx packets will
6274  * end up in the driver.  tp->{tx,}lock are held and thus
6275  * we may not sleep.
6276  */
6277 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6278                                  struct tg3_rx_prodring_set *tpr)
6279 {
6280         u32 i, rx_pkt_dma_sz;
6281
6282         tpr->rx_std_cons_idx = 0;
6283         tpr->rx_std_prod_idx = 0;
6284         tpr->rx_jmb_cons_idx = 0;
6285         tpr->rx_jmb_prod_idx = 0;
6286
6287         if (tpr != &tp->napi[0].prodring) {
6288                 memset(&tpr->rx_std_buffers[0], 0,
6289                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6290                 if (tpr->rx_jmb_buffers)
6291                         memset(&tpr->rx_jmb_buffers[0], 0,
6292                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6293                 goto done;
6294         }
6295
6296         /* Zero out all descriptors. */
6297         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6298
6299         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6300         if (tg3_flag(tp, 5780_CLASS) &&
6301             tp->dev->mtu > ETH_DATA_LEN)
6302                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6303         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6304
6305         /* Initialize invariants of the rings, we only set this
6306          * stuff once.  This works because the card does not
6307          * write into the rx buffer posting rings.
6308          */
6309         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6310                 struct tg3_rx_buffer_desc *rxd;
6311
6312                 rxd = &tpr->rx_std[i];
6313                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6314                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6315                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6316                                (i << RXD_OPAQUE_INDEX_SHIFT));
6317         }
6318
6319         /* Now allocate fresh SKBs for each rx ring. */
6320         for (i = 0; i < tp->rx_pending; i++) {
6321                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6322                         netdev_warn(tp->dev,
6323                                     "Using a smaller RX standard ring. Only "
6324                                     "%d out of %d buffers were allocated "
6325                                     "successfully\n", i, tp->rx_pending);
6326                         if (i == 0)
6327                                 goto initfail;
6328                         tp->rx_pending = i;
6329                         break;
6330                 }
6331         }
6332
6333         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6334                 goto done;
6335
6336         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6337
6338         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6339                 goto done;
6340
6341         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6342                 struct tg3_rx_buffer_desc *rxd;
6343
6344                 rxd = &tpr->rx_jmb[i].std;
6345                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6346                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6347                                   RXD_FLAG_JUMBO;
6348                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6349                        (i << RXD_OPAQUE_INDEX_SHIFT));
6350         }
6351
6352         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6353                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6354                         netdev_warn(tp->dev,
6355                                     "Using a smaller RX jumbo ring. Only %d "
6356                                     "out of %d buffers were allocated "
6357                                     "successfully\n", i, tp->rx_jumbo_pending);
6358                         if (i == 0)
6359                                 goto initfail;
6360                         tp->rx_jumbo_pending = i;
6361                         break;
6362                 }
6363         }
6364
6365 done:
6366         return 0;
6367
6368 initfail:
6369         tg3_rx_prodring_free(tp, tpr);
6370         return -ENOMEM;
6371 }
6372
6373 static void tg3_rx_prodring_fini(struct tg3 *tp,
6374                                  struct tg3_rx_prodring_set *tpr)
6375 {
6376         kfree(tpr->rx_std_buffers);
6377         tpr->rx_std_buffers = NULL;
6378         kfree(tpr->rx_jmb_buffers);
6379         tpr->rx_jmb_buffers = NULL;
6380         if (tpr->rx_std) {
6381                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6382                                   tpr->rx_std, tpr->rx_std_mapping);
6383                 tpr->rx_std = NULL;
6384         }
6385         if (tpr->rx_jmb) {
6386                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6387                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6388                 tpr->rx_jmb = NULL;
6389         }
6390 }
6391
6392 static int tg3_rx_prodring_init(struct tg3 *tp,
6393                                 struct tg3_rx_prodring_set *tpr)
6394 {
6395         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6396                                       GFP_KERNEL);
6397         if (!tpr->rx_std_buffers)
6398                 return -ENOMEM;
6399
6400         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6401                                          TG3_RX_STD_RING_BYTES(tp),
6402                                          &tpr->rx_std_mapping,
6403                                          GFP_KERNEL);
6404         if (!tpr->rx_std)
6405                 goto err_out;
6406
6407         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6408                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6409                                               GFP_KERNEL);
6410                 if (!tpr->rx_jmb_buffers)
6411                         goto err_out;
6412
6413                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6414                                                  TG3_RX_JMB_RING_BYTES(tp),
6415                                                  &tpr->rx_jmb_mapping,
6416                                                  GFP_KERNEL);
6417                 if (!tpr->rx_jmb)
6418                         goto err_out;
6419         }
6420
6421         return 0;
6422
6423 err_out:
6424         tg3_rx_prodring_fini(tp, tpr);
6425         return -ENOMEM;
6426 }
6427
6428 /* Free up pending packets in all rx/tx rings.
6429  *
6430  * The chip has been shut down and the driver detached from
6431  * the networking, so no interrupts or new tx packets will
6432  * end up in the driver.  tp->{tx,}lock is not held and we are not
6433  * in an interrupt context and thus may sleep.
6434  */
6435 static void tg3_free_rings(struct tg3 *tp)
6436 {
6437         int i, j;
6438
6439         for (j = 0; j < tp->irq_cnt; j++) {
6440                 struct tg3_napi *tnapi = &tp->napi[j];
6441
6442                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6443
6444                 if (!tnapi->tx_buffers)
6445                         continue;
6446
6447                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6448                         struct ring_info *txp;
6449                         struct sk_buff *skb;
6450                         unsigned int k;
6451
6452                         txp = &tnapi->tx_buffers[i];
6453                         skb = txp->skb;
6454
6455                         if (skb == NULL) {
6456                                 i++;
6457                                 continue;
6458                         }
6459
6460                         pci_unmap_single(tp->pdev,
6461                                          dma_unmap_addr(txp, mapping),
6462                                          skb_headlen(skb),
6463                                          PCI_DMA_TODEVICE);
6464                         txp->skb = NULL;
6465
6466                         i++;
6467
6468                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6469                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6470                                 pci_unmap_page(tp->pdev,
6471                                                dma_unmap_addr(txp, mapping),
6472                                                skb_shinfo(skb)->frags[k].size,
6473                                                PCI_DMA_TODEVICE);
6474                                 i++;
6475                         }
6476
6477                         dev_kfree_skb_any(skb);
6478                 }
6479         }
6480 }
6481
6482 /* Initialize tx/rx rings for packet processing.
6483  *
6484  * The chip has been shut down and the driver detached from
6485  * the networking, so no interrupts or new tx packets will
6486  * end up in the driver.  tp->{tx,}lock are held and thus
6487  * we may not sleep.
6488  */
6489 static int tg3_init_rings(struct tg3 *tp)
6490 {
6491         int i;
6492
6493         /* Free up all the SKBs. */
6494         tg3_free_rings(tp);
6495
6496         for (i = 0; i < tp->irq_cnt; i++) {
6497                 struct tg3_napi *tnapi = &tp->napi[i];
6498
6499                 tnapi->last_tag = 0;
6500                 tnapi->last_irq_tag = 0;
6501                 tnapi->hw_status->status = 0;
6502                 tnapi->hw_status->status_tag = 0;
6503                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6504
6505                 tnapi->tx_prod = 0;
6506                 tnapi->tx_cons = 0;
6507                 if (tnapi->tx_ring)
6508                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6509
6510                 tnapi->rx_rcb_ptr = 0;
6511                 if (tnapi->rx_rcb)
6512                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6513
6514                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6515                         tg3_free_rings(tp);
6516                         return -ENOMEM;
6517                 }
6518         }
6519
6520         return 0;
6521 }
6522
6523 /*
6524  * Must not be invoked with interrupt sources disabled and
6525  * the hardware shutdown down.
6526  */
6527 static void tg3_free_consistent(struct tg3 *tp)
6528 {
6529         int i;
6530
6531         for (i = 0; i < tp->irq_cnt; i++) {
6532                 struct tg3_napi *tnapi = &tp->napi[i];
6533
6534                 if (tnapi->tx_ring) {
6535                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6536                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6537                         tnapi->tx_ring = NULL;
6538                 }
6539
6540                 kfree(tnapi->tx_buffers);
6541                 tnapi->tx_buffers = NULL;
6542
6543                 if (tnapi->rx_rcb) {
6544                         dma_free_coherent(&tp->pdev->dev,
6545                                           TG3_RX_RCB_RING_BYTES(tp),
6546                                           tnapi->rx_rcb,
6547                                           tnapi->rx_rcb_mapping);
6548                         tnapi->rx_rcb = NULL;
6549                 }
6550
6551                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6552
6553                 if (tnapi->hw_status) {
6554                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6555                                           tnapi->hw_status,
6556                                           tnapi->status_mapping);
6557                         tnapi->hw_status = NULL;
6558                 }
6559         }
6560
6561         if (tp->hw_stats) {
6562                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6563                                   tp->hw_stats, tp->stats_mapping);
6564                 tp->hw_stats = NULL;
6565         }
6566 }
6567
6568 /*
6569  * Must not be invoked with interrupt sources disabled and
6570  * the hardware shutdown down.  Can sleep.
6571  */
6572 static int tg3_alloc_consistent(struct tg3 *tp)
6573 {
6574         int i;
6575
6576         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6577                                           sizeof(struct tg3_hw_stats),
6578                                           &tp->stats_mapping,
6579                                           GFP_KERNEL);
6580         if (!tp->hw_stats)
6581                 goto err_out;
6582
6583         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6584
6585         for (i = 0; i < tp->irq_cnt; i++) {
6586                 struct tg3_napi *tnapi = &tp->napi[i];
6587                 struct tg3_hw_status *sblk;
6588
6589                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6590                                                       TG3_HW_STATUS_SIZE,
6591                                                       &tnapi->status_mapping,
6592                                                       GFP_KERNEL);
6593                 if (!tnapi->hw_status)
6594                         goto err_out;
6595
6596                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6597                 sblk = tnapi->hw_status;
6598
6599                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6600                         goto err_out;
6601
6602                 /* If multivector TSS is enabled, vector 0 does not handle
6603                  * tx interrupts.  Don't allocate any resources for it.
6604                  */
6605                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6606                     (i && tg3_flag(tp, ENABLE_TSS))) {
6607                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6608                                                     TG3_TX_RING_SIZE,
6609                                                     GFP_KERNEL);
6610                         if (!tnapi->tx_buffers)
6611                                 goto err_out;
6612
6613                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6614                                                             TG3_TX_RING_BYTES,
6615                                                         &tnapi->tx_desc_mapping,
6616                                                             GFP_KERNEL);
6617                         if (!tnapi->tx_ring)
6618                                 goto err_out;
6619                 }
6620
6621                 /*
6622                  * When RSS is enabled, the status block format changes
6623                  * slightly.  The "rx_jumbo_consumer", "reserved",
6624                  * and "rx_mini_consumer" members get mapped to the
6625                  * other three rx return ring producer indexes.
6626                  */
6627                 switch (i) {
6628                 default:
6629                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6630                         break;
6631                 case 2:
6632                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6633                         break;
6634                 case 3:
6635                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6636                         break;
6637                 case 4:
6638                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6639                         break;
6640                 }
6641
6642                 /*
6643                  * If multivector RSS is enabled, vector 0 does not handle
6644                  * rx or tx interrupts.  Don't allocate any resources for it.
6645                  */
6646                 if (!i && tg3_flag(tp, ENABLE_RSS))
6647                         continue;
6648
6649                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6650                                                    TG3_RX_RCB_RING_BYTES(tp),
6651                                                    &tnapi->rx_rcb_mapping,
6652                                                    GFP_KERNEL);
6653                 if (!tnapi->rx_rcb)
6654                         goto err_out;
6655
6656                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6657         }
6658
6659         return 0;
6660
6661 err_out:
6662         tg3_free_consistent(tp);
6663         return -ENOMEM;
6664 }
6665
6666 #define MAX_WAIT_CNT 1000
6667
6668 /* To stop a block, clear the enable bit and poll till it
6669  * clears.  tp->lock is held.
6670  */
6671 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6672 {
6673         unsigned int i;
6674         u32 val;
6675
6676         if (tg3_flag(tp, 5705_PLUS)) {
6677                 switch (ofs) {
6678                 case RCVLSC_MODE:
6679                 case DMAC_MODE:
6680                 case MBFREE_MODE:
6681                 case BUFMGR_MODE:
6682                 case MEMARB_MODE:
6683                         /* We can't enable/disable these bits of the
6684                          * 5705/5750, just say success.
6685                          */
6686                         return 0;
6687
6688                 default:
6689                         break;
6690                 }
6691         }
6692
6693         val = tr32(ofs);
6694         val &= ~enable_bit;
6695         tw32_f(ofs, val);
6696
6697         for (i = 0; i < MAX_WAIT_CNT; i++) {
6698                 udelay(100);
6699                 val = tr32(ofs);
6700                 if ((val & enable_bit) == 0)
6701                         break;
6702         }
6703
6704         if (i == MAX_WAIT_CNT && !silent) {
6705                 dev_err(&tp->pdev->dev,
6706                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6707                         ofs, enable_bit);
6708                 return -ENODEV;
6709         }
6710
6711         return 0;
6712 }
6713
6714 /* tp->lock is held. */
6715 static int tg3_abort_hw(struct tg3 *tp, int silent)
6716 {
6717         int i, err;
6718
6719         tg3_disable_ints(tp);
6720
6721         tp->rx_mode &= ~RX_MODE_ENABLE;
6722         tw32_f(MAC_RX_MODE, tp->rx_mode);
6723         udelay(10);
6724
6725         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6726         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6727         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6728         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6729         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6730         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6731
6732         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6733         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6734         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6735         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6736         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6737         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6738         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6739
6740         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6741         tw32_f(MAC_MODE, tp->mac_mode);
6742         udelay(40);
6743
6744         tp->tx_mode &= ~TX_MODE_ENABLE;
6745         tw32_f(MAC_TX_MODE, tp->tx_mode);
6746
6747         for (i = 0; i < MAX_WAIT_CNT; i++) {
6748                 udelay(100);
6749                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6750                         break;
6751         }
6752         if (i >= MAX_WAIT_CNT) {
6753                 dev_err(&tp->pdev->dev,
6754                         "%s timed out, TX_MODE_ENABLE will not clear "
6755                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6756                 err |= -ENODEV;
6757         }
6758
6759         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6760         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6761         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6762
6763         tw32(FTQ_RESET, 0xffffffff);
6764         tw32(FTQ_RESET, 0x00000000);
6765
6766         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6767         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6768
6769         for (i = 0; i < tp->irq_cnt; i++) {
6770                 struct tg3_napi *tnapi = &tp->napi[i];
6771                 if (tnapi->hw_status)
6772                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6773         }
6774         if (tp->hw_stats)
6775                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6776
6777         return err;
6778 }
6779
6780 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6781 {
6782         int i;
6783         u32 apedata;
6784
6785         /* NCSI does not support APE events */
6786         if (tg3_flag(tp, APE_HAS_NCSI))
6787                 return;
6788
6789         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6790         if (apedata != APE_SEG_SIG_MAGIC)
6791                 return;
6792
6793         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6794         if (!(apedata & APE_FW_STATUS_READY))
6795                 return;
6796
6797         /* Wait for up to 1 millisecond for APE to service previous event. */
6798         for (i = 0; i < 10; i++) {
6799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6800                         return;
6801
6802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6803
6804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6805                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6806                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6807
6808                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6809
6810                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6811                         break;
6812
6813                 udelay(100);
6814         }
6815
6816         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6817                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6818 }
6819
6820 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6821 {
6822         u32 event;
6823         u32 apedata;
6824
6825         if (!tg3_flag(tp, ENABLE_APE))
6826                 return;
6827
6828         switch (kind) {
6829         case RESET_KIND_INIT:
6830                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6831                                 APE_HOST_SEG_SIG_MAGIC);
6832                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6833                                 APE_HOST_SEG_LEN_MAGIC);
6834                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6835                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6836                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6837                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6838                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6839                                 APE_HOST_BEHAV_NO_PHYLOCK);
6840                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6841                                     TG3_APE_HOST_DRVR_STATE_START);
6842
6843                 event = APE_EVENT_STATUS_STATE_START;
6844                 break;
6845         case RESET_KIND_SHUTDOWN:
6846                 /* With the interface we are currently using,
6847                  * APE does not track driver state.  Wiping
6848                  * out the HOST SEGMENT SIGNATURE forces
6849                  * the APE to assume OS absent status.
6850                  */
6851                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6852
6853                 if (device_may_wakeup(&tp->pdev->dev) &&
6854                     tg3_flag(tp, WOL_ENABLE)) {
6855                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6856                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6857                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6858                 } else
6859                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6860
6861                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6862
6863                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6864                 break;
6865         case RESET_KIND_SUSPEND:
6866                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6867                 break;
6868         default:
6869                 return;
6870         }
6871
6872         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6873
6874         tg3_ape_send_event(tp, event);
6875 }
6876
6877 /* tp->lock is held. */
6878 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6879 {
6880         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6881                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6882
6883         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6884                 switch (kind) {
6885                 case RESET_KIND_INIT:
6886                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6887                                       DRV_STATE_START);
6888                         break;
6889
6890                 case RESET_KIND_SHUTDOWN:
6891                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6892                                       DRV_STATE_UNLOAD);
6893                         break;
6894
6895                 case RESET_KIND_SUSPEND:
6896                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6897                                       DRV_STATE_SUSPEND);
6898                         break;
6899
6900                 default:
6901                         break;
6902                 }
6903         }
6904
6905         if (kind == RESET_KIND_INIT ||
6906             kind == RESET_KIND_SUSPEND)
6907                 tg3_ape_driver_state_change(tp, kind);
6908 }
6909
6910 /* tp->lock is held. */
6911 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6912 {
6913         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6914                 switch (kind) {
6915                 case RESET_KIND_INIT:
6916                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6917                                       DRV_STATE_START_DONE);
6918                         break;
6919
6920                 case RESET_KIND_SHUTDOWN:
6921                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6922                                       DRV_STATE_UNLOAD_DONE);
6923                         break;
6924
6925                 default:
6926                         break;
6927                 }
6928         }
6929
6930         if (kind == RESET_KIND_SHUTDOWN)
6931                 tg3_ape_driver_state_change(tp, kind);
6932 }
6933
6934 /* tp->lock is held. */
6935 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6936 {
6937         if (tg3_flag(tp, ENABLE_ASF)) {
6938                 switch (kind) {
6939                 case RESET_KIND_INIT:
6940                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6941                                       DRV_STATE_START);
6942                         break;
6943
6944                 case RESET_KIND_SHUTDOWN:
6945                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6946                                       DRV_STATE_UNLOAD);
6947                         break;
6948
6949                 case RESET_KIND_SUSPEND:
6950                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6951                                       DRV_STATE_SUSPEND);
6952                         break;
6953
6954                 default:
6955                         break;
6956                 }
6957         }
6958 }
6959
6960 static int tg3_poll_fw(struct tg3 *tp)
6961 {
6962         int i;
6963         u32 val;
6964
6965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6966                 /* Wait up to 20ms for init done. */
6967                 for (i = 0; i < 200; i++) {
6968                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6969                                 return 0;
6970                         udelay(100);
6971                 }
6972                 return -ENODEV;
6973         }
6974
6975         /* Wait for firmware initialization to complete. */
6976         for (i = 0; i < 100000; i++) {
6977                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6978                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6979                         break;
6980                 udelay(10);
6981         }
6982
6983         /* Chip might not be fitted with firmware.  Some Sun onboard
6984          * parts are configured like that.  So don't signal the timeout
6985          * of the above loop as an error, but do report the lack of
6986          * running firmware once.
6987          */
6988         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6989                 tg3_flag_set(tp, NO_FWARE_REPORTED);
6990
6991                 netdev_info(tp->dev, "No firmware running\n");
6992         }
6993
6994         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6995                 /* The 57765 A0 needs a little more
6996                  * time to do some important work.
6997                  */
6998                 mdelay(10);
6999         }
7000
7001         return 0;
7002 }
7003
7004 /* Save PCI command register before chip reset */
7005 static void tg3_save_pci_state(struct tg3 *tp)
7006 {
7007         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7008 }
7009
7010 /* Restore PCI state after chip reset */
7011 static void tg3_restore_pci_state(struct tg3 *tp)
7012 {
7013         u32 val;
7014
7015         /* Re-enable indirect register accesses. */
7016         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7017                                tp->misc_host_ctrl);
7018
7019         /* Set MAX PCI retry to zero. */
7020         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7021         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7022             tg3_flag(tp, PCIX_MODE))
7023                 val |= PCISTATE_RETRY_SAME_DMA;
7024         /* Allow reads and writes to the APE register and memory space. */
7025         if (tg3_flag(tp, ENABLE_APE))
7026                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7027                        PCISTATE_ALLOW_APE_SHMEM_WR |
7028                        PCISTATE_ALLOW_APE_PSPACE_WR;
7029         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7030
7031         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7032
7033         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7034                 if (tg3_flag(tp, PCI_EXPRESS))
7035                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7036                 else {
7037                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7038                                               tp->pci_cacheline_sz);
7039                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7040                                               tp->pci_lat_timer);
7041                 }
7042         }
7043
7044         /* Make sure PCI-X relaxed ordering bit is clear. */
7045         if (tg3_flag(tp, PCIX_MODE)) {
7046                 u16 pcix_cmd;
7047
7048                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7049                                      &pcix_cmd);
7050                 pcix_cmd &= ~PCI_X_CMD_ERO;
7051                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7052                                       pcix_cmd);
7053         }
7054
7055         if (tg3_flag(tp, 5780_CLASS)) {
7056
7057                 /* Chip reset on 5780 will reset MSI enable bit,
7058                  * so need to restore it.
7059                  */
7060                 if (tg3_flag(tp, USING_MSI)) {
7061                         u16 ctrl;
7062
7063                         pci_read_config_word(tp->pdev,
7064                                              tp->msi_cap + PCI_MSI_FLAGS,
7065                                              &ctrl);
7066                         pci_write_config_word(tp->pdev,
7067                                               tp->msi_cap + PCI_MSI_FLAGS,
7068                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7069                         val = tr32(MSGINT_MODE);
7070                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7071                 }
7072         }
7073 }
7074
7075 static void tg3_stop_fw(struct tg3 *);
7076
7077 /* tp->lock is held. */
7078 static int tg3_chip_reset(struct tg3 *tp)
7079 {
7080         u32 val;
7081         void (*write_op)(struct tg3 *, u32, u32);
7082         int i, err;
7083
7084         tg3_nvram_lock(tp);
7085
7086         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7087
7088         /* No matching tg3_nvram_unlock() after this because
7089          * chip reset below will undo the nvram lock.
7090          */
7091         tp->nvram_lock_cnt = 0;
7092
7093         /* GRC_MISC_CFG core clock reset will clear the memory
7094          * enable bit in PCI register 4 and the MSI enable bit
7095          * on some chips, so we save relevant registers here.
7096          */
7097         tg3_save_pci_state(tp);
7098
7099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7100             tg3_flag(tp, 5755_PLUS))
7101                 tw32(GRC_FASTBOOT_PC, 0);
7102
7103         /*
7104          * We must avoid the readl() that normally takes place.
7105          * It locks machines, causes machine checks, and other
7106          * fun things.  So, temporarily disable the 5701
7107          * hardware workaround, while we do the reset.
7108          */
7109         write_op = tp->write32;
7110         if (write_op == tg3_write_flush_reg32)
7111                 tp->write32 = tg3_write32;
7112
7113         /* Prevent the irq handler from reading or writing PCI registers
7114          * during chip reset when the memory enable bit in the PCI command
7115          * register may be cleared.  The chip does not generate interrupt
7116          * at this time, but the irq handler may still be called due to irq
7117          * sharing or irqpoll.
7118          */
7119         tg3_flag_set(tp, CHIP_RESETTING);
7120         for (i = 0; i < tp->irq_cnt; i++) {
7121                 struct tg3_napi *tnapi = &tp->napi[i];
7122                 if (tnapi->hw_status) {
7123                         tnapi->hw_status->status = 0;
7124                         tnapi->hw_status->status_tag = 0;
7125                 }
7126                 tnapi->last_tag = 0;
7127                 tnapi->last_irq_tag = 0;
7128         }
7129         smp_mb();
7130
7131         for (i = 0; i < tp->irq_cnt; i++)
7132                 synchronize_irq(tp->napi[i].irq_vec);
7133
7134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7135                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7136                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7137         }
7138
7139         /* do the reset */
7140         val = GRC_MISC_CFG_CORECLK_RESET;
7141
7142         if (tg3_flag(tp, PCI_EXPRESS)) {
7143                 /* Force PCIe 1.0a mode */
7144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7145                     !tg3_flag(tp, 57765_PLUS) &&
7146                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7147                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7148                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7149
7150                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7151                         tw32(GRC_MISC_CFG, (1 << 29));
7152                         val |= (1 << 29);
7153                 }
7154         }
7155
7156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7157                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7158                 tw32(GRC_VCPU_EXT_CTRL,
7159                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7160         }
7161
7162         /* Manage gphy power for all CPMU absent PCIe devices. */
7163         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7164                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7165
7166         tw32(GRC_MISC_CFG, val);
7167
7168         /* restore 5701 hardware bug workaround write method */
7169         tp->write32 = write_op;
7170
7171         /* Unfortunately, we have to delay before the PCI read back.
7172          * Some 575X chips even will not respond to a PCI cfg access
7173          * when the reset command is given to the chip.
7174          *
7175          * How do these hardware designers expect things to work
7176          * properly if the PCI write is posted for a long period
7177          * of time?  It is always necessary to have some method by
7178          * which a register read back can occur to push the write
7179          * out which does the reset.
7180          *
7181          * For most tg3 variants the trick below was working.
7182          * Ho hum...
7183          */
7184         udelay(120);
7185
7186         /* Flush PCI posted writes.  The normal MMIO registers
7187          * are inaccessible at this time so this is the only
7188          * way to make this reliably (actually, this is no longer
7189          * the case, see above).  I tried to use indirect
7190          * register read/write but this upset some 5701 variants.
7191          */
7192         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7193
7194         udelay(120);
7195
7196         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7197                 u16 val16;
7198
7199                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7200                         int i;
7201                         u32 cfg_val;
7202
7203                         /* Wait for link training to complete.  */
7204                         for (i = 0; i < 5000; i++)
7205                                 udelay(100);
7206
7207                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7208                         pci_write_config_dword(tp->pdev, 0xc4,
7209                                                cfg_val | (1 << 15));
7210                 }
7211
7212                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7213                 pci_read_config_word(tp->pdev,
7214                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7215                                      &val16);
7216                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7217                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7218                 /*
7219                  * Older PCIe devices only support the 128 byte
7220                  * MPS setting.  Enforce the restriction.
7221                  */
7222                 if (!tg3_flag(tp, CPMU_PRESENT))
7223                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7224                 pci_write_config_word(tp->pdev,
7225                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7226                                       val16);
7227
7228                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7229
7230                 /* Clear error status */
7231                 pci_write_config_word(tp->pdev,
7232                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7233                                       PCI_EXP_DEVSTA_CED |
7234                                       PCI_EXP_DEVSTA_NFED |
7235                                       PCI_EXP_DEVSTA_FED |
7236                                       PCI_EXP_DEVSTA_URD);
7237         }
7238
7239         tg3_restore_pci_state(tp);
7240
7241         tg3_flag_clear(tp, CHIP_RESETTING);
7242         tg3_flag_clear(tp, ERROR_PROCESSED);
7243
7244         val = 0;
7245         if (tg3_flag(tp, 5780_CLASS))
7246                 val = tr32(MEMARB_MODE);
7247         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7248
7249         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7250                 tg3_stop_fw(tp);
7251                 tw32(0x5000, 0x400);
7252         }
7253
7254         tw32(GRC_MODE, tp->grc_mode);
7255
7256         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7257                 val = tr32(0xc4);
7258
7259                 tw32(0xc4, val | (1 << 15));
7260         }
7261
7262         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7263             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7264                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7265                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7266                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7267                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7268         }
7269
7270         if (tg3_flag(tp, ENABLE_APE))
7271                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7272                                MAC_MODE_APE_RX_EN |
7273                                MAC_MODE_TDE_ENABLE;
7274
7275         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7276                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7277                 val = tp->mac_mode;
7278         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7279                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7280                 val = tp->mac_mode;
7281         } else
7282                 val = 0;
7283
7284         tw32_f(MAC_MODE, val);
7285         udelay(40);
7286
7287         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7288
7289         err = tg3_poll_fw(tp);
7290         if (err)
7291                 return err;
7292
7293         tg3_mdio_start(tp);
7294
7295         if (tg3_flag(tp, PCI_EXPRESS) &&
7296             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7297             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7298             !tg3_flag(tp, 57765_PLUS)) {
7299                 val = tr32(0x7c00);
7300
7301                 tw32(0x7c00, val | (1 << 25));
7302         }
7303
7304         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7305                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7306                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7307         }
7308
7309         /* Reprobe ASF enable state.  */
7310         tg3_flag_clear(tp, ENABLE_ASF);
7311         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7312         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7313         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7314                 u32 nic_cfg;
7315
7316                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7317                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7318                         tg3_flag_set(tp, ENABLE_ASF);
7319                         tp->last_event_jiffies = jiffies;
7320                         if (tg3_flag(tp, 5750_PLUS))
7321                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7322                 }
7323         }
7324
7325         return 0;
7326 }
7327
7328 /* tp->lock is held. */
7329 static void tg3_stop_fw(struct tg3 *tp)
7330 {
7331         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7332                 /* Wait for RX cpu to ACK the previous event. */
7333                 tg3_wait_for_event_ack(tp);
7334
7335                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7336
7337                 tg3_generate_fw_event(tp);
7338
7339                 /* Wait for RX cpu to ACK this event. */
7340                 tg3_wait_for_event_ack(tp);
7341         }
7342 }
7343
7344 /* tp->lock is held. */
7345 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7346 {
7347         int err;
7348
7349         tg3_stop_fw(tp);
7350
7351         tg3_write_sig_pre_reset(tp, kind);
7352
7353         tg3_abort_hw(tp, silent);
7354         err = tg3_chip_reset(tp);
7355
7356         __tg3_set_mac_addr(tp, 0);
7357
7358         tg3_write_sig_legacy(tp, kind);
7359         tg3_write_sig_post_reset(tp, kind);
7360
7361         if (err)
7362                 return err;
7363
7364         return 0;
7365 }
7366
7367 #define RX_CPU_SCRATCH_BASE     0x30000
7368 #define RX_CPU_SCRATCH_SIZE     0x04000
7369 #define TX_CPU_SCRATCH_BASE     0x34000
7370 #define TX_CPU_SCRATCH_SIZE     0x04000
7371
7372 /* tp->lock is held. */
7373 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7374 {
7375         int i;
7376
7377         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7378
7379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7380                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7381
7382                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7383                 return 0;
7384         }
7385         if (offset == RX_CPU_BASE) {
7386                 for (i = 0; i < 10000; i++) {
7387                         tw32(offset + CPU_STATE, 0xffffffff);
7388                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7389                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7390                                 break;
7391                 }
7392
7393                 tw32(offset + CPU_STATE, 0xffffffff);
7394                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7395                 udelay(10);
7396         } else {
7397                 for (i = 0; i < 10000; i++) {
7398                         tw32(offset + CPU_STATE, 0xffffffff);
7399                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7400                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7401                                 break;
7402                 }
7403         }
7404
7405         if (i >= 10000) {
7406                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7407                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7408                 return -ENODEV;
7409         }
7410
7411         /* Clear firmware's nvram arbitration. */
7412         if (tg3_flag(tp, NVRAM))
7413                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7414         return 0;
7415 }
7416
7417 struct fw_info {
7418         unsigned int fw_base;
7419         unsigned int fw_len;
7420         const __be32 *fw_data;
7421 };
7422
7423 /* tp->lock is held. */
7424 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7425                                  int cpu_scratch_size, struct fw_info *info)
7426 {
7427         int err, lock_err, i;
7428         void (*write_op)(struct tg3 *, u32, u32);
7429
7430         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7431                 netdev_err(tp->dev,
7432                            "%s: Trying to load TX cpu firmware which is 5705\n",
7433                            __func__);
7434                 return -EINVAL;
7435         }
7436
7437         if (tg3_flag(tp, 5705_PLUS))
7438                 write_op = tg3_write_mem;
7439         else
7440                 write_op = tg3_write_indirect_reg32;
7441
7442         /* It is possible that bootcode is still loading at this point.
7443          * Get the nvram lock first before halting the cpu.
7444          */
7445         lock_err = tg3_nvram_lock(tp);
7446         err = tg3_halt_cpu(tp, cpu_base);
7447         if (!lock_err)
7448                 tg3_nvram_unlock(tp);
7449         if (err)
7450                 goto out;
7451
7452         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7453                 write_op(tp, cpu_scratch_base + i, 0);
7454         tw32(cpu_base + CPU_STATE, 0xffffffff);
7455         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7456         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7457                 write_op(tp, (cpu_scratch_base +
7458                               (info->fw_base & 0xffff) +
7459                               (i * sizeof(u32))),
7460                               be32_to_cpu(info->fw_data[i]));
7461
7462         err = 0;
7463
7464 out:
7465         return err;
7466 }
7467
7468 /* tp->lock is held. */
7469 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7470 {
7471         struct fw_info info;
7472         const __be32 *fw_data;
7473         int err, i;
7474
7475         fw_data = (void *)tp->fw->data;
7476
7477         /* Firmware blob starts with version numbers, followed by
7478            start address and length. We are setting complete length.
7479            length = end_address_of_bss - start_address_of_text.
7480            Remainder is the blob to be loaded contiguously
7481            from start address. */
7482
7483         info.fw_base = be32_to_cpu(fw_data[1]);
7484         info.fw_len = tp->fw->size - 12;
7485         info.fw_data = &fw_data[3];
7486
7487         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7488                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7489                                     &info);
7490         if (err)
7491                 return err;
7492
7493         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7494                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7495                                     &info);
7496         if (err)
7497                 return err;
7498
7499         /* Now startup only the RX cpu. */
7500         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7501         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7502
7503         for (i = 0; i < 5; i++) {
7504                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7505                         break;
7506                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7507                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7508                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7509                 udelay(1000);
7510         }
7511         if (i >= 5) {
7512                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7513                            "should be %08x\n", __func__,
7514                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7515                 return -ENODEV;
7516         }
7517         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7518         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7519
7520         return 0;
7521 }
7522
7523 /* tp->lock is held. */
7524 static int tg3_load_tso_firmware(struct tg3 *tp)
7525 {
7526         struct fw_info info;
7527         const __be32 *fw_data;
7528         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7529         int err, i;
7530
7531         if (tg3_flag(tp, HW_TSO_1) ||
7532             tg3_flag(tp, HW_TSO_2) ||
7533             tg3_flag(tp, HW_TSO_3))
7534                 return 0;
7535
7536         fw_data = (void *)tp->fw->data;
7537
7538         /* Firmware blob starts with version numbers, followed by
7539            start address and length. We are setting complete length.
7540            length = end_address_of_bss - start_address_of_text.
7541            Remainder is the blob to be loaded contiguously
7542            from start address. */
7543
7544         info.fw_base = be32_to_cpu(fw_data[1]);
7545         cpu_scratch_size = tp->fw_len;
7546         info.fw_len = tp->fw->size - 12;
7547         info.fw_data = &fw_data[3];
7548
7549         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7550                 cpu_base = RX_CPU_BASE;
7551                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7552         } else {
7553                 cpu_base = TX_CPU_BASE;
7554                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7555                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7556         }
7557
7558         err = tg3_load_firmware_cpu(tp, cpu_base,
7559                                     cpu_scratch_base, cpu_scratch_size,
7560                                     &info);
7561         if (err)
7562                 return err;
7563
7564         /* Now startup the cpu. */
7565         tw32(cpu_base + CPU_STATE, 0xffffffff);
7566         tw32_f(cpu_base + CPU_PC, info.fw_base);
7567
7568         for (i = 0; i < 5; i++) {
7569                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7570                         break;
7571                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7572                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7573                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7574                 udelay(1000);
7575         }
7576         if (i >= 5) {
7577                 netdev_err(tp->dev,
7578                            "%s fails to set CPU PC, is %08x should be %08x\n",
7579                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7580                 return -ENODEV;
7581         }
7582         tw32(cpu_base + CPU_STATE, 0xffffffff);
7583         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7584         return 0;
7585 }
7586
7587
7588 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7589 {
7590         struct tg3 *tp = netdev_priv(dev);
7591         struct sockaddr *addr = p;
7592         int err = 0, skip_mac_1 = 0;
7593
7594         if (!is_valid_ether_addr(addr->sa_data))
7595                 return -EINVAL;
7596
7597         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7598
7599         if (!netif_running(dev))
7600                 return 0;
7601
7602         if (tg3_flag(tp, ENABLE_ASF)) {
7603                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7604
7605                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7606                 addr0_low = tr32(MAC_ADDR_0_LOW);
7607                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7608                 addr1_low = tr32(MAC_ADDR_1_LOW);
7609
7610                 /* Skip MAC addr 1 if ASF is using it. */
7611                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7612                     !(addr1_high == 0 && addr1_low == 0))
7613                         skip_mac_1 = 1;
7614         }
7615         spin_lock_bh(&tp->lock);
7616         __tg3_set_mac_addr(tp, skip_mac_1);
7617         spin_unlock_bh(&tp->lock);
7618
7619         return err;
7620 }
7621
7622 /* tp->lock is held. */
7623 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7624                            dma_addr_t mapping, u32 maxlen_flags,
7625                            u32 nic_addr)
7626 {
7627         tg3_write_mem(tp,
7628                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7629                       ((u64) mapping >> 32));
7630         tg3_write_mem(tp,
7631                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7632                       ((u64) mapping & 0xffffffff));
7633         tg3_write_mem(tp,
7634                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7635                        maxlen_flags);
7636
7637         if (!tg3_flag(tp, 5705_PLUS))
7638                 tg3_write_mem(tp,
7639                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7640                               nic_addr);
7641 }
7642
7643 static void __tg3_set_rx_mode(struct net_device *);
7644 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7645 {
7646         int i;
7647
7648         if (!tg3_flag(tp, ENABLE_TSS)) {
7649                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7650                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7651                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7652         } else {
7653                 tw32(HOSTCC_TXCOL_TICKS, 0);
7654                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7655                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7656         }
7657
7658         if (!tg3_flag(tp, ENABLE_RSS)) {
7659                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7660                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7661                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7662         } else {
7663                 tw32(HOSTCC_RXCOL_TICKS, 0);
7664                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7665                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7666         }
7667
7668         if (!tg3_flag(tp, 5705_PLUS)) {
7669                 u32 val = ec->stats_block_coalesce_usecs;
7670
7671                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7672                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7673
7674                 if (!netif_carrier_ok(tp->dev))
7675                         val = 0;
7676
7677                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7678         }
7679
7680         for (i = 0; i < tp->irq_cnt - 1; i++) {
7681                 u32 reg;
7682
7683                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7684                 tw32(reg, ec->rx_coalesce_usecs);
7685                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7686                 tw32(reg, ec->rx_max_coalesced_frames);
7687                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7688                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7689
7690                 if (tg3_flag(tp, ENABLE_TSS)) {
7691                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7692                         tw32(reg, ec->tx_coalesce_usecs);
7693                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7694                         tw32(reg, ec->tx_max_coalesced_frames);
7695                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7696                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7697                 }
7698         }
7699
7700         for (; i < tp->irq_max - 1; i++) {
7701                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7702                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7703                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7704
7705                 if (tg3_flag(tp, ENABLE_TSS)) {
7706                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7707                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7708                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7709                 }
7710         }
7711 }
7712
7713 /* tp->lock is held. */
7714 static void tg3_rings_reset(struct tg3 *tp)
7715 {
7716         int i;
7717         u32 stblk, txrcb, rxrcb, limit;
7718         struct tg3_napi *tnapi = &tp->napi[0];
7719
7720         /* Disable all transmit rings but the first. */
7721         if (!tg3_flag(tp, 5705_PLUS))
7722                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7723         else if (tg3_flag(tp, 5717_PLUS))
7724                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7725         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7726                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7727         else
7728                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7729
7730         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7731              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7732                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7733                               BDINFO_FLAGS_DISABLED);
7734
7735
7736         /* Disable all receive return rings but the first. */
7737         if (tg3_flag(tp, 5717_PLUS))
7738                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7739         else if (!tg3_flag(tp, 5705_PLUS))
7740                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7741         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7742                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7743                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7744         else
7745                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7746
7747         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7748              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7749                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7750                               BDINFO_FLAGS_DISABLED);
7751
7752         /* Disable interrupts */
7753         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7754
7755         /* Zero mailbox registers. */
7756         if (tg3_flag(tp, SUPPORT_MSIX)) {
7757                 for (i = 1; i < tp->irq_max; i++) {
7758                         tp->napi[i].tx_prod = 0;
7759                         tp->napi[i].tx_cons = 0;
7760                         if (tg3_flag(tp, ENABLE_TSS))
7761                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7762                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7763                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7764                 }
7765                 if (!tg3_flag(tp, ENABLE_TSS))
7766                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7767         } else {
7768                 tp->napi[0].tx_prod = 0;
7769                 tp->napi[0].tx_cons = 0;
7770                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7771                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7772         }
7773
7774         /* Make sure the NIC-based send BD rings are disabled. */
7775         if (!tg3_flag(tp, 5705_PLUS)) {
7776                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7777                 for (i = 0; i < 16; i++)
7778                         tw32_tx_mbox(mbox + i * 8, 0);
7779         }
7780
7781         txrcb = NIC_SRAM_SEND_RCB;
7782         rxrcb = NIC_SRAM_RCV_RET_RCB;
7783
7784         /* Clear status block in ram. */
7785         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7786
7787         /* Set status block DMA address */
7788         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7789              ((u64) tnapi->status_mapping >> 32));
7790         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7791              ((u64) tnapi->status_mapping & 0xffffffff));
7792
7793         if (tnapi->tx_ring) {
7794                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7795                                (TG3_TX_RING_SIZE <<
7796                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7797                                NIC_SRAM_TX_BUFFER_DESC);
7798                 txrcb += TG3_BDINFO_SIZE;
7799         }
7800
7801         if (tnapi->rx_rcb) {
7802                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7803                                (tp->rx_ret_ring_mask + 1) <<
7804                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7805                 rxrcb += TG3_BDINFO_SIZE;
7806         }
7807
7808         stblk = HOSTCC_STATBLCK_RING1;
7809
7810         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7811                 u64 mapping = (u64)tnapi->status_mapping;
7812                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7813                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7814
7815                 /* Clear status block in ram. */
7816                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7817
7818                 if (tnapi->tx_ring) {
7819                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7820                                        (TG3_TX_RING_SIZE <<
7821                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7822                                        NIC_SRAM_TX_BUFFER_DESC);
7823                         txrcb += TG3_BDINFO_SIZE;
7824                 }
7825
7826                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7827                                ((tp->rx_ret_ring_mask + 1) <<
7828                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7829
7830                 stblk += 8;
7831                 rxrcb += TG3_BDINFO_SIZE;
7832         }
7833 }
7834
7835 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7836 {
7837         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7838
7839         if (!tg3_flag(tp, 5750_PLUS) ||
7840             tg3_flag(tp, 5780_CLASS) ||
7841             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7843                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7844         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7845                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7846                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7847         else
7848                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7849
7850         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7851         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7852
7853         val = min(nic_rep_thresh, host_rep_thresh);
7854         tw32(RCVBDI_STD_THRESH, val);
7855
7856         if (tg3_flag(tp, 57765_PLUS))
7857                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7858
7859         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7860                 return;
7861
7862         if (!tg3_flag(tp, 5705_PLUS))
7863                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7864         else
7865                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7866
7867         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7868
7869         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7870         tw32(RCVBDI_JUMBO_THRESH, val);
7871
7872         if (tg3_flag(tp, 57765_PLUS))
7873                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7874 }
7875
7876 /* tp->lock is held. */
7877 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7878 {
7879         u32 val, rdmac_mode;
7880         int i, err, limit;
7881         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7882
7883         tg3_disable_ints(tp);
7884
7885         tg3_stop_fw(tp);
7886
7887         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7888
7889         if (tg3_flag(tp, INIT_COMPLETE))
7890                 tg3_abort_hw(tp, 1);
7891
7892         /* Enable MAC control of LPI */
7893         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7894                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7895                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7896                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7897
7898                 tw32_f(TG3_CPMU_EEE_CTRL,
7899                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7900
7901                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7902                       TG3_CPMU_EEEMD_LPI_IN_TX |
7903                       TG3_CPMU_EEEMD_LPI_IN_RX |
7904                       TG3_CPMU_EEEMD_EEE_ENABLE;
7905
7906                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7907                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7908
7909                 if (tg3_flag(tp, ENABLE_APE))
7910                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7911
7912                 tw32_f(TG3_CPMU_EEE_MODE, val);
7913
7914                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7915                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7916                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7917
7918                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7919                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7920                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7921         }
7922
7923         if (reset_phy)
7924                 tg3_phy_reset(tp);
7925
7926         err = tg3_chip_reset(tp);
7927         if (err)
7928                 return err;
7929
7930         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7931
7932         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7933                 val = tr32(TG3_CPMU_CTRL);
7934                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7935                 tw32(TG3_CPMU_CTRL, val);
7936
7937                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7938                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7939                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7940                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7941
7942                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7943                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7944                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7945                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7946
7947                 val = tr32(TG3_CPMU_HST_ACC);
7948                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7949                 val |= CPMU_HST_ACC_MACCLK_6_25;
7950                 tw32(TG3_CPMU_HST_ACC, val);
7951         }
7952
7953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7954                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7955                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7956                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7957                 tw32(PCIE_PWR_MGMT_THRESH, val);
7958
7959                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7960                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7961
7962                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7963
7964                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7965                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7966         }
7967
7968         if (tg3_flag(tp, L1PLLPD_EN)) {
7969                 u32 grc_mode = tr32(GRC_MODE);
7970
7971                 /* Access the lower 1K of PL PCIE block registers. */
7972                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7973                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7974
7975                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7976                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7977                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7978
7979                 tw32(GRC_MODE, grc_mode);
7980         }
7981
7982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7983                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7984                         u32 grc_mode = tr32(GRC_MODE);
7985
7986                         /* Access the lower 1K of PL PCIE block registers. */
7987                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7988                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7989
7990                         val = tr32(TG3_PCIE_TLDLPL_PORT +
7991                                    TG3_PCIE_PL_LO_PHYCTL5);
7992                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7993                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7994
7995                         tw32(GRC_MODE, grc_mode);
7996                 }
7997
7998                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7999                         u32 grc_mode = tr32(GRC_MODE);
8000
8001                         /* Access the lower 1K of DL PCIE block registers. */
8002                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8003                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8004
8005                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8006                                    TG3_PCIE_DL_LO_FTSMAX);
8007                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8008                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8009                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8010
8011                         tw32(GRC_MODE, grc_mode);
8012                 }
8013
8014                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8015                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8016                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8017                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8018         }
8019
8020         /* This works around an issue with Athlon chipsets on
8021          * B3 tigon3 silicon.  This bit has no effect on any
8022          * other revision.  But do not set this on PCI Express
8023          * chips and don't even touch the clocks if the CPMU is present.
8024          */
8025         if (!tg3_flag(tp, CPMU_PRESENT)) {
8026                 if (!tg3_flag(tp, PCI_EXPRESS))
8027                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8028                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8029         }
8030
8031         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8032             tg3_flag(tp, PCIX_MODE)) {
8033                 val = tr32(TG3PCI_PCISTATE);
8034                 val |= PCISTATE_RETRY_SAME_DMA;
8035                 tw32(TG3PCI_PCISTATE, val);
8036         }
8037
8038         if (tg3_flag(tp, ENABLE_APE)) {
8039                 /* Allow reads and writes to the
8040                  * APE register and memory space.
8041                  */
8042                 val = tr32(TG3PCI_PCISTATE);
8043                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8044                        PCISTATE_ALLOW_APE_SHMEM_WR |
8045                        PCISTATE_ALLOW_APE_PSPACE_WR;
8046                 tw32(TG3PCI_PCISTATE, val);
8047         }
8048
8049         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8050                 /* Enable some hw fixes.  */
8051                 val = tr32(TG3PCI_MSI_DATA);
8052                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8053                 tw32(TG3PCI_MSI_DATA, val);
8054         }
8055
8056         /* Descriptor ring init may make accesses to the
8057          * NIC SRAM area to setup the TX descriptors, so we
8058          * can only do this after the hardware has been
8059          * successfully reset.
8060          */
8061         err = tg3_init_rings(tp);
8062         if (err)
8063                 return err;
8064
8065         if (tg3_flag(tp, 57765_PLUS)) {
8066                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8067                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8068                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8069                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8070                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8071                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8072                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8073                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8074         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8075                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8076                 /* This value is determined during the probe time DMA
8077                  * engine test, tg3_test_dma.
8078                  */
8079                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8080         }
8081
8082         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8083                           GRC_MODE_4X_NIC_SEND_RINGS |
8084                           GRC_MODE_NO_TX_PHDR_CSUM |
8085                           GRC_MODE_NO_RX_PHDR_CSUM);
8086         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8087
8088         /* Pseudo-header checksum is done by hardware logic and not
8089          * the offload processers, so make the chip do the pseudo-
8090          * header checksums on receive.  For transmit it is more
8091          * convenient to do the pseudo-header checksum in software
8092          * as Linux does that on transmit for us in all cases.
8093          */
8094         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8095
8096         tw32(GRC_MODE,
8097              tp->grc_mode |
8098              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8099
8100         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8101         val = tr32(GRC_MISC_CFG);
8102         val &= ~0xff;
8103         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8104         tw32(GRC_MISC_CFG, val);
8105
8106         /* Initialize MBUF/DESC pool. */
8107         if (tg3_flag(tp, 5750_PLUS)) {
8108                 /* Do nothing.  */
8109         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8110                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8112                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8113                 else
8114                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8115                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8116                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8117         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8118                 int fw_len;
8119
8120                 fw_len = tp->fw_len;
8121                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8122                 tw32(BUFMGR_MB_POOL_ADDR,
8123                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8124                 tw32(BUFMGR_MB_POOL_SIZE,
8125                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8126         }
8127
8128         if (tp->dev->mtu <= ETH_DATA_LEN) {
8129                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8130                      tp->bufmgr_config.mbuf_read_dma_low_water);
8131                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8132                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8133                 tw32(BUFMGR_MB_HIGH_WATER,
8134                      tp->bufmgr_config.mbuf_high_water);
8135         } else {
8136                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8137                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8138                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8139                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8140                 tw32(BUFMGR_MB_HIGH_WATER,
8141                      tp->bufmgr_config.mbuf_high_water_jumbo);
8142         }
8143         tw32(BUFMGR_DMA_LOW_WATER,
8144              tp->bufmgr_config.dma_low_water);
8145         tw32(BUFMGR_DMA_HIGH_WATER,
8146              tp->bufmgr_config.dma_high_water);
8147
8148         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8150                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8152             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8153             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8154                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8155         tw32(BUFMGR_MODE, val);
8156         for (i = 0; i < 2000; i++) {
8157                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8158                         break;
8159                 udelay(10);
8160         }
8161         if (i >= 2000) {
8162                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8163                 return -ENODEV;
8164         }
8165
8166         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8167                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8168
8169         tg3_setup_rxbd_thresholds(tp);
8170
8171         /* Initialize TG3_BDINFO's at:
8172          *  RCVDBDI_STD_BD:     standard eth size rx ring
8173          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8174          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8175          *
8176          * like so:
8177          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8178          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8179          *                              ring attribute flags
8180          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8181          *
8182          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8183          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8184          *
8185          * The size of each ring is fixed in the firmware, but the location is
8186          * configurable.
8187          */
8188         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8189              ((u64) tpr->rx_std_mapping >> 32));
8190         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8191              ((u64) tpr->rx_std_mapping & 0xffffffff));
8192         if (!tg3_flag(tp, 5717_PLUS))
8193                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8194                      NIC_SRAM_RX_BUFFER_DESC);
8195
8196         /* Disable the mini ring */
8197         if (!tg3_flag(tp, 5705_PLUS))
8198                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8199                      BDINFO_FLAGS_DISABLED);
8200
8201         /* Program the jumbo buffer descriptor ring control
8202          * blocks on those devices that have them.
8203          */
8204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8205             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8206
8207                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8208                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8209                              ((u64) tpr->rx_jmb_mapping >> 32));
8210                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8211                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8212                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8213                               BDINFO_FLAGS_MAXLEN_SHIFT;
8214                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8215                              val | BDINFO_FLAGS_USE_EXT_RECV);
8216                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8217                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8218                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8219                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8220                 } else {
8221                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8222                              BDINFO_FLAGS_DISABLED);
8223                 }
8224
8225                 if (tg3_flag(tp, 57765_PLUS)) {
8226                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8227                                 val = TG3_RX_STD_MAX_SIZE_5700;
8228                         else
8229                                 val = TG3_RX_STD_MAX_SIZE_5717;
8230                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8231                         val |= (TG3_RX_STD_DMA_SZ << 2);
8232                 } else
8233                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8234         } else
8235                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8236
8237         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8238
8239         tpr->rx_std_prod_idx = tp->rx_pending;
8240         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8241
8242         tpr->rx_jmb_prod_idx =
8243                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8244         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8245
8246         tg3_rings_reset(tp);
8247
8248         /* Initialize MAC address and backoff seed. */
8249         __tg3_set_mac_addr(tp, 0);
8250
8251         /* MTU + ethernet header + FCS + optional VLAN tag */
8252         tw32(MAC_RX_MTU_SIZE,
8253              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8254
8255         /* The slot time is changed by tg3_setup_phy if we
8256          * run at gigabit with half duplex.
8257          */
8258         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8259               (6 << TX_LENGTHS_IPG_SHIFT) |
8260               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8261
8262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8263                 val |= tr32(MAC_TX_LENGTHS) &
8264                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8265                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8266
8267         tw32(MAC_TX_LENGTHS, val);
8268
8269         /* Receive rules. */
8270         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8271         tw32(RCVLPC_CONFIG, 0x0181);
8272
8273         /* Calculate RDMAC_MODE setting early, we need it to determine
8274          * the RCVLPC_STATE_ENABLE mask.
8275          */
8276         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8277                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8278                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8279                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8280                       RDMAC_MODE_LNGREAD_ENAB);
8281
8282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8283                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8284
8285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8288                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8289                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8290                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8291
8292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8293             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8294                 if (tg3_flag(tp, TSO_CAPABLE) &&
8295                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8296                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8297                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8298                            !tg3_flag(tp, IS_5788)) {
8299                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8300                 }
8301         }
8302
8303         if (tg3_flag(tp, PCI_EXPRESS))
8304                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8305
8306         if (tg3_flag(tp, HW_TSO_1) ||
8307             tg3_flag(tp, HW_TSO_2) ||
8308             tg3_flag(tp, HW_TSO_3))
8309                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8310
8311         if (tg3_flag(tp, 57765_PLUS) ||
8312             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8314                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8315
8316         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8317                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8318
8319         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8320             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8323             tg3_flag(tp, 57765_PLUS)) {
8324                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8326                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8327                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8328                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8329                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8330                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8331                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8332                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8333                 }
8334                 tw32(TG3_RDMA_RSRVCTRL_REG,
8335                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8336         }
8337
8338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8339             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8340                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8341                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8342                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8343                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8344         }
8345
8346         /* Receive/send statistics. */
8347         if (tg3_flag(tp, 5750_PLUS)) {
8348                 val = tr32(RCVLPC_STATS_ENABLE);
8349                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8350                 tw32(RCVLPC_STATS_ENABLE, val);
8351         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8352                    tg3_flag(tp, TSO_CAPABLE)) {
8353                 val = tr32(RCVLPC_STATS_ENABLE);
8354                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8355                 tw32(RCVLPC_STATS_ENABLE, val);
8356         } else {
8357                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8358         }
8359         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8360         tw32(SNDDATAI_STATSENAB, 0xffffff);
8361         tw32(SNDDATAI_STATSCTRL,
8362              (SNDDATAI_SCTRL_ENABLE |
8363               SNDDATAI_SCTRL_FASTUPD));
8364
8365         /* Setup host coalescing engine. */
8366         tw32(HOSTCC_MODE, 0);
8367         for (i = 0; i < 2000; i++) {
8368                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8369                         break;
8370                 udelay(10);
8371         }
8372
8373         __tg3_set_coalesce(tp, &tp->coal);
8374
8375         if (!tg3_flag(tp, 5705_PLUS)) {
8376                 /* Status/statistics block address.  See tg3_timer,
8377                  * the tg3_periodic_fetch_stats call there, and
8378                  * tg3_get_stats to see how this works for 5705/5750 chips.
8379                  */
8380                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8381                      ((u64) tp->stats_mapping >> 32));
8382                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8383                      ((u64) tp->stats_mapping & 0xffffffff));
8384                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8385
8386                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8387
8388                 /* Clear statistics and status block memory areas */
8389                 for (i = NIC_SRAM_STATS_BLK;
8390                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8391                      i += sizeof(u32)) {
8392                         tg3_write_mem(tp, i, 0);
8393                         udelay(40);
8394                 }
8395         }
8396
8397         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8398
8399         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8400         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8401         if (!tg3_flag(tp, 5705_PLUS))
8402                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8403
8404         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8405                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8406                 /* reset to prevent losing 1st rx packet intermittently */
8407                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8408                 udelay(10);
8409         }
8410
8411         if (tg3_flag(tp, ENABLE_APE))
8412                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8413         else
8414                 tp->mac_mode = 0;
8415         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8416                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8417         if (!tg3_flag(tp, 5705_PLUS) &&
8418             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8419             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8420                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8421         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8422         udelay(40);
8423
8424         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8425          * If TG3_FLAG_IS_NIC is zero, we should read the
8426          * register to preserve the GPIO settings for LOMs. The GPIOs,
8427          * whether used as inputs or outputs, are set by boot code after
8428          * reset.
8429          */
8430         if (!tg3_flag(tp, IS_NIC)) {
8431                 u32 gpio_mask;
8432
8433                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8434                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8435                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8436
8437                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8438                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8439                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8440
8441                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8442                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8443
8444                 tp->grc_local_ctrl &= ~gpio_mask;
8445                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8446
8447                 /* GPIO1 must be driven high for eeprom write protect */
8448                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8449                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8450                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8451         }
8452         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8453         udelay(100);
8454
8455         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8456                 val = tr32(MSGINT_MODE);
8457                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8458                 tw32(MSGINT_MODE, val);
8459         }
8460
8461         if (!tg3_flag(tp, 5705_PLUS)) {
8462                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8463                 udelay(40);
8464         }
8465
8466         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8467                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8468                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8469                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8470                WDMAC_MODE_LNGREAD_ENAB);
8471
8472         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8473             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8474                 if (tg3_flag(tp, TSO_CAPABLE) &&
8475                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8476                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8477                         /* nothing */
8478                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8479                            !tg3_flag(tp, IS_5788)) {
8480                         val |= WDMAC_MODE_RX_ACCEL;
8481                 }
8482         }
8483
8484         /* Enable host coalescing bug fix */
8485         if (tg3_flag(tp, 5755_PLUS))
8486                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8487
8488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8489                 val |= WDMAC_MODE_BURST_ALL_DATA;
8490
8491         tw32_f(WDMAC_MODE, val);
8492         udelay(40);
8493
8494         if (tg3_flag(tp, PCIX_MODE)) {
8495                 u16 pcix_cmd;
8496
8497                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8498                                      &pcix_cmd);
8499                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8500                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8501                         pcix_cmd |= PCI_X_CMD_READ_2K;
8502                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8503                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8504                         pcix_cmd |= PCI_X_CMD_READ_2K;
8505                 }
8506                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8507                                       pcix_cmd);
8508         }
8509
8510         tw32_f(RDMAC_MODE, rdmac_mode);
8511         udelay(40);
8512
8513         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8514         if (!tg3_flag(tp, 5705_PLUS))
8515                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8516
8517         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8518                 tw32(SNDDATAC_MODE,
8519                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8520         else
8521                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8522
8523         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8524         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8525         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8526         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8527                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8528         tw32(RCVDBDI_MODE, val);
8529         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8530         if (tg3_flag(tp, HW_TSO_1) ||
8531             tg3_flag(tp, HW_TSO_2) ||
8532             tg3_flag(tp, HW_TSO_3))
8533                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8534         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8535         if (tg3_flag(tp, ENABLE_TSS))
8536                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8537         tw32(SNDBDI_MODE, val);
8538         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8539
8540         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8541                 err = tg3_load_5701_a0_firmware_fix(tp);
8542                 if (err)
8543                         return err;
8544         }
8545
8546         if (tg3_flag(tp, TSO_CAPABLE)) {
8547                 err = tg3_load_tso_firmware(tp);
8548                 if (err)
8549                         return err;
8550         }
8551
8552         tp->tx_mode = TX_MODE_ENABLE;
8553
8554         if (tg3_flag(tp, 5755_PLUS) ||
8555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8556                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8557
8558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8559                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8560                 tp->tx_mode &= ~val;
8561                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8562         }
8563
8564         tw32_f(MAC_TX_MODE, tp->tx_mode);
8565         udelay(100);
8566
8567         if (tg3_flag(tp, ENABLE_RSS)) {
8568                 u32 reg = MAC_RSS_INDIR_TBL_0;
8569                 u8 *ent = (u8 *)&val;
8570
8571                 /* Setup the indirection table */
8572                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8573                         int idx = i % sizeof(val);
8574
8575                         ent[idx] = i % (tp->irq_cnt - 1);
8576                         if (idx == sizeof(val) - 1) {
8577                                 tw32(reg, val);
8578                                 reg += 4;
8579                         }
8580                 }
8581
8582                 /* Setup the "secret" hash key. */
8583                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8584                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8585                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8586                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8587                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8588                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8589                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8590                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8591                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8592                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8593         }
8594
8595         tp->rx_mode = RX_MODE_ENABLE;
8596         if (tg3_flag(tp, 5755_PLUS))
8597                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8598
8599         if (tg3_flag(tp, ENABLE_RSS))
8600                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8601                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8602                                RX_MODE_RSS_IPV6_HASH_EN |
8603                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8604                                RX_MODE_RSS_IPV4_HASH_EN |
8605                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8606
8607         tw32_f(MAC_RX_MODE, tp->rx_mode);
8608         udelay(10);
8609
8610         tw32(MAC_LED_CTRL, tp->led_ctrl);
8611
8612         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8613         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8614                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8615                 udelay(10);
8616         }
8617         tw32_f(MAC_RX_MODE, tp->rx_mode);
8618         udelay(10);
8619
8620         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8621                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8622                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8623                         /* Set drive transmission level to 1.2V  */
8624                         /* only if the signal pre-emphasis bit is not set  */
8625                         val = tr32(MAC_SERDES_CFG);
8626                         val &= 0xfffff000;
8627                         val |= 0x880;
8628                         tw32(MAC_SERDES_CFG, val);
8629                 }
8630                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8631                         tw32(MAC_SERDES_CFG, 0x616000);
8632         }
8633
8634         /* Prevent chip from dropping frames when flow control
8635          * is enabled.
8636          */
8637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8638                 val = 1;
8639         else
8640                 val = 2;
8641         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8642
8643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8644             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8645                 /* Use hardware link auto-negotiation */
8646                 tg3_flag_set(tp, HW_AUTONEG);
8647         }
8648
8649         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8650             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8651                 u32 tmp;
8652
8653                 tmp = tr32(SERDES_RX_CTRL);
8654                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8655                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8656                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8657                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8658         }
8659
8660         if (!tg3_flag(tp, USE_PHYLIB)) {
8661                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8662                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8663                         tp->link_config.speed = tp->link_config.orig_speed;
8664                         tp->link_config.duplex = tp->link_config.orig_duplex;
8665                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8666                 }
8667
8668                 err = tg3_setup_phy(tp, 0);
8669                 if (err)
8670                         return err;
8671
8672                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8673                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8674                         u32 tmp;
8675
8676                         /* Clear CRC stats. */
8677                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8678                                 tg3_writephy(tp, MII_TG3_TEST1,
8679                                              tmp | MII_TG3_TEST1_CRC_EN);
8680                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8681                         }
8682                 }
8683         }
8684
8685         __tg3_set_rx_mode(tp->dev);
8686
8687         /* Initialize receive rules. */
8688         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8689         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8690         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8691         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8692
8693         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8694                 limit = 8;
8695         else
8696                 limit = 16;
8697         if (tg3_flag(tp, ENABLE_ASF))
8698                 limit -= 4;
8699         switch (limit) {
8700         case 16:
8701                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8702         case 15:
8703                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8704         case 14:
8705                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8706         case 13:
8707                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8708         case 12:
8709                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8710         case 11:
8711                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8712         case 10:
8713                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8714         case 9:
8715                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8716         case 8:
8717                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8718         case 7:
8719                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8720         case 6:
8721                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8722         case 5:
8723                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8724         case 4:
8725                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8726         case 3:
8727                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8728         case 2:
8729         case 1:
8730
8731         default:
8732                 break;
8733         }
8734
8735         if (tg3_flag(tp, ENABLE_APE))
8736                 /* Write our heartbeat update interval to APE. */
8737                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8738                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8739
8740         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8741
8742         return 0;
8743 }
8744
8745 /* Called at device open time to get the chip ready for
8746  * packet processing.  Invoked with tp->lock held.
8747  */
8748 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8749 {
8750         tg3_switch_clocks(tp);
8751
8752         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8753
8754         return tg3_reset_hw(tp, reset_phy);
8755 }
8756
8757 #define TG3_STAT_ADD32(PSTAT, REG) \
8758 do {    u32 __val = tr32(REG); \
8759         (PSTAT)->low += __val; \
8760         if ((PSTAT)->low < __val) \
8761                 (PSTAT)->high += 1; \
8762 } while (0)
8763
8764 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8765 {
8766         struct tg3_hw_stats *sp = tp->hw_stats;
8767
8768         if (!netif_carrier_ok(tp->dev))
8769                 return;
8770
8771         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8772         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8773         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8774         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8775         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8776         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8777         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8778         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8779         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8780         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8781         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8782         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8783         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8784
8785         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8786         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8787         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8788         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8789         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8790         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8791         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8792         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8793         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8794         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8795         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8796         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8797         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8798         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8799
8800         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8801         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8802             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8803             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8804                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8805         } else {
8806                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8807                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8808                 if (val) {
8809                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8810                         sp->rx_discards.low += val;
8811                         if (sp->rx_discards.low < val)
8812                                 sp->rx_discards.high += 1;
8813                 }
8814                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8815         }
8816         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8817 }
8818
8819 static void tg3_timer(unsigned long __opaque)
8820 {
8821         struct tg3 *tp = (struct tg3 *) __opaque;
8822
8823         if (tp->irq_sync)
8824                 goto restart_timer;
8825
8826         spin_lock(&tp->lock);
8827
8828         if (!tg3_flag(tp, TAGGED_STATUS)) {
8829                 /* All of this garbage is because when using non-tagged
8830                  * IRQ status the mailbox/status_block protocol the chip
8831                  * uses with the cpu is race prone.
8832                  */
8833                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8834                         tw32(GRC_LOCAL_CTRL,
8835                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8836                 } else {
8837                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8838                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8839                 }
8840
8841                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8842                         tg3_flag_set(tp, RESTART_TIMER);
8843                         spin_unlock(&tp->lock);
8844                         schedule_work(&tp->reset_task);
8845                         return;
8846                 }
8847         }
8848
8849         /* This part only runs once per second. */
8850         if (!--tp->timer_counter) {
8851                 if (tg3_flag(tp, 5705_PLUS))
8852                         tg3_periodic_fetch_stats(tp);
8853
8854                 if (tp->setlpicnt && !--tp->setlpicnt)
8855                         tg3_phy_eee_enable(tp);
8856
8857                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8858                         u32 mac_stat;
8859                         int phy_event;
8860
8861                         mac_stat = tr32(MAC_STATUS);
8862
8863                         phy_event = 0;
8864                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8865                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8866                                         phy_event = 1;
8867                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8868                                 phy_event = 1;
8869
8870                         if (phy_event)
8871                                 tg3_setup_phy(tp, 0);
8872                 } else if (tg3_flag(tp, POLL_SERDES)) {
8873                         u32 mac_stat = tr32(MAC_STATUS);
8874                         int need_setup = 0;
8875
8876                         if (netif_carrier_ok(tp->dev) &&
8877                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8878                                 need_setup = 1;
8879                         }
8880                         if (!netif_carrier_ok(tp->dev) &&
8881                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8882                                          MAC_STATUS_SIGNAL_DET))) {
8883                                 need_setup = 1;
8884                         }
8885                         if (need_setup) {
8886                                 if (!tp->serdes_counter) {
8887                                         tw32_f(MAC_MODE,
8888                                              (tp->mac_mode &
8889                                               ~MAC_MODE_PORT_MODE_MASK));
8890                                         udelay(40);
8891                                         tw32_f(MAC_MODE, tp->mac_mode);
8892                                         udelay(40);
8893                                 }
8894                                 tg3_setup_phy(tp, 0);
8895                         }
8896                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8897                            tg3_flag(tp, 5780_CLASS)) {
8898                         tg3_serdes_parallel_detect(tp);
8899                 }
8900
8901                 tp->timer_counter = tp->timer_multiplier;
8902         }
8903
8904         /* Heartbeat is only sent once every 2 seconds.
8905          *
8906          * The heartbeat is to tell the ASF firmware that the host
8907          * driver is still alive.  In the event that the OS crashes,
8908          * ASF needs to reset the hardware to free up the FIFO space
8909          * that may be filled with rx packets destined for the host.
8910          * If the FIFO is full, ASF will no longer function properly.
8911          *
8912          * Unintended resets have been reported on real time kernels
8913          * where the timer doesn't run on time.  Netpoll will also have
8914          * same problem.
8915          *
8916          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8917          * to check the ring condition when the heartbeat is expiring
8918          * before doing the reset.  This will prevent most unintended
8919          * resets.
8920          */
8921         if (!--tp->asf_counter) {
8922                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8923                         tg3_wait_for_event_ack(tp);
8924
8925                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8926                                       FWCMD_NICDRV_ALIVE3);
8927                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8928                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8929                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8930
8931                         tg3_generate_fw_event(tp);
8932                 }
8933                 tp->asf_counter = tp->asf_multiplier;
8934         }
8935
8936         spin_unlock(&tp->lock);
8937
8938 restart_timer:
8939         tp->timer.expires = jiffies + tp->timer_offset;
8940         add_timer(&tp->timer);
8941 }
8942
8943 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8944 {
8945         irq_handler_t fn;
8946         unsigned long flags;
8947         char *name;
8948         struct tg3_napi *tnapi = &tp->napi[irq_num];
8949
8950         if (tp->irq_cnt == 1)
8951                 name = tp->dev->name;
8952         else {
8953                 name = &tnapi->irq_lbl[0];
8954                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8955                 name[IFNAMSIZ-1] = 0;
8956         }
8957
8958         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8959                 fn = tg3_msi;
8960                 if (tg3_flag(tp, 1SHOT_MSI))
8961                         fn = tg3_msi_1shot;
8962                 flags = 0;
8963         } else {
8964                 fn = tg3_interrupt;
8965                 if (tg3_flag(tp, TAGGED_STATUS))
8966                         fn = tg3_interrupt_tagged;
8967                 flags = IRQF_SHARED;
8968         }
8969
8970         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8971 }
8972
8973 static int tg3_test_interrupt(struct tg3 *tp)
8974 {
8975         struct tg3_napi *tnapi = &tp->napi[0];
8976         struct net_device *dev = tp->dev;
8977         int err, i, intr_ok = 0;
8978         u32 val;
8979
8980         if (!netif_running(dev))
8981                 return -ENODEV;
8982
8983         tg3_disable_ints(tp);
8984
8985         free_irq(tnapi->irq_vec, tnapi);
8986
8987         /*
8988          * Turn off MSI one shot mode.  Otherwise this test has no
8989          * observable way to know whether the interrupt was delivered.
8990          */
8991         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8992                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8993                 tw32(MSGINT_MODE, val);
8994         }
8995
8996         err = request_irq(tnapi->irq_vec, tg3_test_isr,
8997                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8998         if (err)
8999                 return err;
9000
9001         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9002         tg3_enable_ints(tp);
9003
9004         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9005                tnapi->coal_now);
9006
9007         for (i = 0; i < 5; i++) {
9008                 u32 int_mbox, misc_host_ctrl;
9009
9010                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9011                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9012
9013                 if ((int_mbox != 0) ||
9014                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9015                         intr_ok = 1;
9016                         break;
9017                 }
9018
9019                 msleep(10);
9020         }
9021
9022         tg3_disable_ints(tp);
9023
9024         free_irq(tnapi->irq_vec, tnapi);
9025
9026         err = tg3_request_irq(tp, 0);
9027
9028         if (err)
9029                 return err;
9030
9031         if (intr_ok) {
9032                 /* Reenable MSI one shot mode. */
9033                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9034                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9035                         tw32(MSGINT_MODE, val);
9036                 }
9037                 return 0;
9038         }
9039
9040         return -EIO;
9041 }
9042
9043 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9044  * successfully restored
9045  */
9046 static int tg3_test_msi(struct tg3 *tp)
9047 {
9048         int err;
9049         u16 pci_cmd;
9050
9051         if (!tg3_flag(tp, USING_MSI))
9052                 return 0;
9053
9054         /* Turn off SERR reporting in case MSI terminates with Master
9055          * Abort.
9056          */
9057         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9058         pci_write_config_word(tp->pdev, PCI_COMMAND,
9059                               pci_cmd & ~PCI_COMMAND_SERR);
9060
9061         err = tg3_test_interrupt(tp);
9062
9063         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9064
9065         if (!err)
9066                 return 0;
9067
9068         /* other failures */
9069         if (err != -EIO)
9070                 return err;
9071
9072         /* MSI test failed, go back to INTx mode */
9073         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9074                     "to INTx mode. Please report this failure to the PCI "
9075                     "maintainer and include system chipset information\n");
9076
9077         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9078
9079         pci_disable_msi(tp->pdev);
9080
9081         tg3_flag_clear(tp, USING_MSI);
9082         tp->napi[0].irq_vec = tp->pdev->irq;
9083
9084         err = tg3_request_irq(tp, 0);
9085         if (err)
9086                 return err;
9087
9088         /* Need to reset the chip because the MSI cycle may have terminated
9089          * with Master Abort.
9090          */
9091         tg3_full_lock(tp, 1);
9092
9093         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9094         err = tg3_init_hw(tp, 1);
9095
9096         tg3_full_unlock(tp);
9097
9098         if (err)
9099                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9100
9101         return err;
9102 }
9103
9104 static int tg3_request_firmware(struct tg3 *tp)
9105 {
9106         const __be32 *fw_data;
9107
9108         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9109                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9110                            tp->fw_needed);
9111                 return -ENOENT;
9112         }
9113
9114         fw_data = (void *)tp->fw->data;
9115
9116         /* Firmware blob starts with version numbers, followed by
9117          * start address and _full_ length including BSS sections
9118          * (which must be longer than the actual data, of course
9119          */
9120
9121         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9122         if (tp->fw_len < (tp->fw->size - 12)) {
9123                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9124                            tp->fw_len, tp->fw_needed);
9125                 release_firmware(tp->fw);
9126                 tp->fw = NULL;
9127                 return -EINVAL;
9128         }
9129
9130         /* We no longer need firmware; we have it. */
9131         tp->fw_needed = NULL;
9132         return 0;
9133 }
9134
9135 static bool tg3_enable_msix(struct tg3 *tp)
9136 {
9137         int i, rc, cpus = num_online_cpus();
9138         struct msix_entry msix_ent[tp->irq_max];
9139
9140         if (cpus == 1)
9141                 /* Just fallback to the simpler MSI mode. */
9142                 return false;
9143
9144         /*
9145          * We want as many rx rings enabled as there are cpus.
9146          * The first MSIX vector only deals with link interrupts, etc,
9147          * so we add one to the number of vectors we are requesting.
9148          */
9149         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9150
9151         for (i = 0; i < tp->irq_max; i++) {
9152                 msix_ent[i].entry  = i;
9153                 msix_ent[i].vector = 0;
9154         }
9155
9156         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9157         if (rc < 0) {
9158                 return false;
9159         } else if (rc != 0) {
9160                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9161                         return false;
9162                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9163                               tp->irq_cnt, rc);
9164                 tp->irq_cnt = rc;
9165         }
9166
9167         for (i = 0; i < tp->irq_max; i++)
9168                 tp->napi[i].irq_vec = msix_ent[i].vector;
9169
9170         netif_set_real_num_tx_queues(tp->dev, 1);
9171         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9172         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9173                 pci_disable_msix(tp->pdev);
9174                 return false;
9175         }
9176
9177         if (tp->irq_cnt > 1) {
9178                 tg3_flag_set(tp, ENABLE_RSS);
9179
9180                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9181                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9182                         tg3_flag_set(tp, ENABLE_TSS);
9183                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9184                 }
9185         }
9186
9187         return true;
9188 }
9189
9190 static void tg3_ints_init(struct tg3 *tp)
9191 {
9192         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9193             !tg3_flag(tp, TAGGED_STATUS)) {
9194                 /* All MSI supporting chips should support tagged
9195                  * status.  Assert that this is the case.
9196                  */
9197                 netdev_warn(tp->dev,
9198                             "MSI without TAGGED_STATUS? Not using MSI\n");
9199                 goto defcfg;
9200         }
9201
9202         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9203                 tg3_flag_set(tp, USING_MSIX);
9204         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9205                 tg3_flag_set(tp, USING_MSI);
9206
9207         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9208                 u32 msi_mode = tr32(MSGINT_MODE);
9209                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9210                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9211                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9212         }
9213 defcfg:
9214         if (!tg3_flag(tp, USING_MSIX)) {
9215                 tp->irq_cnt = 1;
9216                 tp->napi[0].irq_vec = tp->pdev->irq;
9217                 netif_set_real_num_tx_queues(tp->dev, 1);
9218                 netif_set_real_num_rx_queues(tp->dev, 1);
9219         }
9220 }
9221
9222 static void tg3_ints_fini(struct tg3 *tp)
9223 {
9224         if (tg3_flag(tp, USING_MSIX))
9225                 pci_disable_msix(tp->pdev);
9226         else if (tg3_flag(tp, USING_MSI))
9227                 pci_disable_msi(tp->pdev);
9228         tg3_flag_clear(tp, USING_MSI);
9229         tg3_flag_clear(tp, USING_MSIX);
9230         tg3_flag_clear(tp, ENABLE_RSS);
9231         tg3_flag_clear(tp, ENABLE_TSS);
9232 }
9233
9234 static int tg3_open(struct net_device *dev)
9235 {
9236         struct tg3 *tp = netdev_priv(dev);
9237         int i, err;
9238
9239         if (tp->fw_needed) {
9240                 err = tg3_request_firmware(tp);
9241                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9242                         if (err)
9243                                 return err;
9244                 } else if (err) {
9245                         netdev_warn(tp->dev, "TSO capability disabled\n");
9246                         tg3_flag_clear(tp, TSO_CAPABLE);
9247                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9248                         netdev_notice(tp->dev, "TSO capability restored\n");
9249                         tg3_flag_set(tp, TSO_CAPABLE);
9250                 }
9251         }
9252
9253         netif_carrier_off(tp->dev);
9254
9255         err = tg3_power_up(tp);
9256         if (err)
9257                 return err;
9258
9259         tg3_full_lock(tp, 0);
9260
9261         tg3_disable_ints(tp);
9262         tg3_flag_clear(tp, INIT_COMPLETE);
9263
9264         tg3_full_unlock(tp);
9265
9266         /*
9267          * Setup interrupts first so we know how
9268          * many NAPI resources to allocate
9269          */
9270         tg3_ints_init(tp);
9271
9272         /* The placement of this call is tied
9273          * to the setup and use of Host TX descriptors.
9274          */
9275         err = tg3_alloc_consistent(tp);
9276         if (err)
9277                 goto err_out1;
9278
9279         tg3_napi_init(tp);
9280
9281         tg3_napi_enable(tp);
9282
9283         for (i = 0; i < tp->irq_cnt; i++) {
9284                 struct tg3_napi *tnapi = &tp->napi[i];
9285                 err = tg3_request_irq(tp, i);
9286                 if (err) {
9287                         for (i--; i >= 0; i--)
9288                                 free_irq(tnapi->irq_vec, tnapi);
9289                         break;
9290                 }
9291         }
9292
9293         if (err)
9294                 goto err_out2;
9295
9296         tg3_full_lock(tp, 0);
9297
9298         err = tg3_init_hw(tp, 1);
9299         if (err) {
9300                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9301                 tg3_free_rings(tp);
9302         } else {
9303                 if (tg3_flag(tp, TAGGED_STATUS))
9304                         tp->timer_offset = HZ;
9305                 else
9306                         tp->timer_offset = HZ / 10;
9307
9308                 BUG_ON(tp->timer_offset > HZ);
9309                 tp->timer_counter = tp->timer_multiplier =
9310                         (HZ / tp->timer_offset);
9311                 tp->asf_counter = tp->asf_multiplier =
9312                         ((HZ / tp->timer_offset) * 2);
9313
9314                 init_timer(&tp->timer);
9315                 tp->timer.expires = jiffies + tp->timer_offset;
9316                 tp->timer.data = (unsigned long) tp;
9317                 tp->timer.function = tg3_timer;
9318         }
9319
9320         tg3_full_unlock(tp);
9321
9322         if (err)
9323                 goto err_out3;
9324
9325         if (tg3_flag(tp, USING_MSI)) {
9326                 err = tg3_test_msi(tp);
9327
9328                 if (err) {
9329                         tg3_full_lock(tp, 0);
9330                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9331                         tg3_free_rings(tp);
9332                         tg3_full_unlock(tp);
9333
9334                         goto err_out2;
9335                 }
9336
9337                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9338                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9339
9340                         tw32(PCIE_TRANSACTION_CFG,
9341                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9342                 }
9343         }
9344
9345         tg3_phy_start(tp);
9346
9347         tg3_full_lock(tp, 0);
9348
9349         add_timer(&tp->timer);
9350         tg3_flag_set(tp, INIT_COMPLETE);
9351         tg3_enable_ints(tp);
9352
9353         tg3_full_unlock(tp);
9354
9355         netif_tx_start_all_queues(dev);
9356
9357         /*
9358          * Reset loopback feature if it was turned on while the device was down
9359          * make sure that it's installed properly now.
9360          */
9361         if (dev->features & NETIF_F_LOOPBACK)
9362                 tg3_set_loopback(dev, dev->features);
9363
9364         return 0;
9365
9366 err_out3:
9367         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9368                 struct tg3_napi *tnapi = &tp->napi[i];
9369                 free_irq(tnapi->irq_vec, tnapi);
9370         }
9371
9372 err_out2:
9373         tg3_napi_disable(tp);
9374         tg3_napi_fini(tp);
9375         tg3_free_consistent(tp);
9376
9377 err_out1:
9378         tg3_ints_fini(tp);
9379         return err;
9380 }
9381
9382 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9383                                                  struct rtnl_link_stats64 *);
9384 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9385
9386 static int tg3_close(struct net_device *dev)
9387 {
9388         int i;
9389         struct tg3 *tp = netdev_priv(dev);
9390
9391         tg3_napi_disable(tp);
9392         cancel_work_sync(&tp->reset_task);
9393
9394         netif_tx_stop_all_queues(dev);
9395
9396         del_timer_sync(&tp->timer);
9397
9398         tg3_phy_stop(tp);
9399
9400         tg3_full_lock(tp, 1);
9401
9402         tg3_disable_ints(tp);
9403
9404         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9405         tg3_free_rings(tp);
9406         tg3_flag_clear(tp, INIT_COMPLETE);
9407
9408         tg3_full_unlock(tp);
9409
9410         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9411                 struct tg3_napi *tnapi = &tp->napi[i];
9412                 free_irq(tnapi->irq_vec, tnapi);
9413         }
9414
9415         tg3_ints_fini(tp);
9416
9417         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9418
9419         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9420                sizeof(tp->estats_prev));
9421
9422         tg3_napi_fini(tp);
9423
9424         tg3_free_consistent(tp);
9425
9426         tg3_power_down(tp);
9427
9428         netif_carrier_off(tp->dev);
9429
9430         return 0;
9431 }
9432
9433 static inline u64 get_stat64(tg3_stat64_t *val)
9434 {
9435        return ((u64)val->high << 32) | ((u64)val->low);
9436 }
9437
9438 static u64 calc_crc_errors(struct tg3 *tp)
9439 {
9440         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9441
9442         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9443             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9444              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9445                 u32 val;
9446
9447                 spin_lock_bh(&tp->lock);
9448                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9449                         tg3_writephy(tp, MII_TG3_TEST1,
9450                                      val | MII_TG3_TEST1_CRC_EN);
9451                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9452                 } else
9453                         val = 0;
9454                 spin_unlock_bh(&tp->lock);
9455
9456                 tp->phy_crc_errors += val;
9457
9458                 return tp->phy_crc_errors;
9459         }
9460
9461         return get_stat64(&hw_stats->rx_fcs_errors);
9462 }
9463
9464 #define ESTAT_ADD(member) \
9465         estats->member =        old_estats->member + \
9466                                 get_stat64(&hw_stats->member)
9467
9468 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9469 {
9470         struct tg3_ethtool_stats *estats = &tp->estats;
9471         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9472         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9473
9474         if (!hw_stats)
9475                 return old_estats;
9476
9477         ESTAT_ADD(rx_octets);
9478         ESTAT_ADD(rx_fragments);
9479         ESTAT_ADD(rx_ucast_packets);
9480         ESTAT_ADD(rx_mcast_packets);
9481         ESTAT_ADD(rx_bcast_packets);
9482         ESTAT_ADD(rx_fcs_errors);
9483         ESTAT_ADD(rx_align_errors);
9484         ESTAT_ADD(rx_xon_pause_rcvd);
9485         ESTAT_ADD(rx_xoff_pause_rcvd);
9486         ESTAT_ADD(rx_mac_ctrl_rcvd);
9487         ESTAT_ADD(rx_xoff_entered);
9488         ESTAT_ADD(rx_frame_too_long_errors);
9489         ESTAT_ADD(rx_jabbers);
9490         ESTAT_ADD(rx_undersize_packets);
9491         ESTAT_ADD(rx_in_length_errors);
9492         ESTAT_ADD(rx_out_length_errors);
9493         ESTAT_ADD(rx_64_or_less_octet_packets);
9494         ESTAT_ADD(rx_65_to_127_octet_packets);
9495         ESTAT_ADD(rx_128_to_255_octet_packets);
9496         ESTAT_ADD(rx_256_to_511_octet_packets);
9497         ESTAT_ADD(rx_512_to_1023_octet_packets);
9498         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9499         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9500         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9501         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9502         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9503
9504         ESTAT_ADD(tx_octets);
9505         ESTAT_ADD(tx_collisions);
9506         ESTAT_ADD(tx_xon_sent);
9507         ESTAT_ADD(tx_xoff_sent);
9508         ESTAT_ADD(tx_flow_control);
9509         ESTAT_ADD(tx_mac_errors);
9510         ESTAT_ADD(tx_single_collisions);
9511         ESTAT_ADD(tx_mult_collisions);
9512         ESTAT_ADD(tx_deferred);
9513         ESTAT_ADD(tx_excessive_collisions);
9514         ESTAT_ADD(tx_late_collisions);
9515         ESTAT_ADD(tx_collide_2times);
9516         ESTAT_ADD(tx_collide_3times);
9517         ESTAT_ADD(tx_collide_4times);
9518         ESTAT_ADD(tx_collide_5times);
9519         ESTAT_ADD(tx_collide_6times);
9520         ESTAT_ADD(tx_collide_7times);
9521         ESTAT_ADD(tx_collide_8times);
9522         ESTAT_ADD(tx_collide_9times);
9523         ESTAT_ADD(tx_collide_10times);
9524         ESTAT_ADD(tx_collide_11times);
9525         ESTAT_ADD(tx_collide_12times);
9526         ESTAT_ADD(tx_collide_13times);
9527         ESTAT_ADD(tx_collide_14times);
9528         ESTAT_ADD(tx_collide_15times);
9529         ESTAT_ADD(tx_ucast_packets);
9530         ESTAT_ADD(tx_mcast_packets);
9531         ESTAT_ADD(tx_bcast_packets);
9532         ESTAT_ADD(tx_carrier_sense_errors);
9533         ESTAT_ADD(tx_discards);
9534         ESTAT_ADD(tx_errors);
9535
9536         ESTAT_ADD(dma_writeq_full);
9537         ESTAT_ADD(dma_write_prioq_full);
9538         ESTAT_ADD(rxbds_empty);
9539         ESTAT_ADD(rx_discards);
9540         ESTAT_ADD(rx_errors);
9541         ESTAT_ADD(rx_threshold_hit);
9542
9543         ESTAT_ADD(dma_readq_full);
9544         ESTAT_ADD(dma_read_prioq_full);
9545         ESTAT_ADD(tx_comp_queue_full);
9546
9547         ESTAT_ADD(ring_set_send_prod_index);
9548         ESTAT_ADD(ring_status_update);
9549         ESTAT_ADD(nic_irqs);
9550         ESTAT_ADD(nic_avoided_irqs);
9551         ESTAT_ADD(nic_tx_threshold_hit);
9552
9553         ESTAT_ADD(mbuf_lwm_thresh_hit);
9554
9555         return estats;
9556 }
9557
9558 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9559                                                  struct rtnl_link_stats64 *stats)
9560 {
9561         struct tg3 *tp = netdev_priv(dev);
9562         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9563         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9564
9565         if (!hw_stats)
9566                 return old_stats;
9567
9568         stats->rx_packets = old_stats->rx_packets +
9569                 get_stat64(&hw_stats->rx_ucast_packets) +
9570                 get_stat64(&hw_stats->rx_mcast_packets) +
9571                 get_stat64(&hw_stats->rx_bcast_packets);
9572
9573         stats->tx_packets = old_stats->tx_packets +
9574                 get_stat64(&hw_stats->tx_ucast_packets) +
9575                 get_stat64(&hw_stats->tx_mcast_packets) +
9576                 get_stat64(&hw_stats->tx_bcast_packets);
9577
9578         stats->rx_bytes = old_stats->rx_bytes +
9579                 get_stat64(&hw_stats->rx_octets);
9580         stats->tx_bytes = old_stats->tx_bytes +
9581                 get_stat64(&hw_stats->tx_octets);
9582
9583         stats->rx_errors = old_stats->rx_errors +
9584                 get_stat64(&hw_stats->rx_errors);
9585         stats->tx_errors = old_stats->tx_errors +
9586                 get_stat64(&hw_stats->tx_errors) +
9587                 get_stat64(&hw_stats->tx_mac_errors) +
9588                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9589                 get_stat64(&hw_stats->tx_discards);
9590
9591         stats->multicast = old_stats->multicast +
9592                 get_stat64(&hw_stats->rx_mcast_packets);
9593         stats->collisions = old_stats->collisions +
9594                 get_stat64(&hw_stats->tx_collisions);
9595
9596         stats->rx_length_errors = old_stats->rx_length_errors +
9597                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9598                 get_stat64(&hw_stats->rx_undersize_packets);
9599
9600         stats->rx_over_errors = old_stats->rx_over_errors +
9601                 get_stat64(&hw_stats->rxbds_empty);
9602         stats->rx_frame_errors = old_stats->rx_frame_errors +
9603                 get_stat64(&hw_stats->rx_align_errors);
9604         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9605                 get_stat64(&hw_stats->tx_discards);
9606         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9607                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9608
9609         stats->rx_crc_errors = old_stats->rx_crc_errors +
9610                 calc_crc_errors(tp);
9611
9612         stats->rx_missed_errors = old_stats->rx_missed_errors +
9613                 get_stat64(&hw_stats->rx_discards);
9614
9615         stats->rx_dropped = tp->rx_dropped;
9616
9617         return stats;
9618 }
9619
9620 static inline u32 calc_crc(unsigned char *buf, int len)
9621 {
9622         u32 reg;
9623         u32 tmp;
9624         int j, k;
9625
9626         reg = 0xffffffff;
9627
9628         for (j = 0; j < len; j++) {
9629                 reg ^= buf[j];
9630
9631                 for (k = 0; k < 8; k++) {
9632                         tmp = reg & 0x01;
9633
9634                         reg >>= 1;
9635
9636                         if (tmp)
9637                                 reg ^= 0xedb88320;
9638                 }
9639         }
9640
9641         return ~reg;
9642 }
9643
9644 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9645 {
9646         /* accept or reject all multicast frames */
9647         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9648         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9649         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9650         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9651 }
9652
9653 static void __tg3_set_rx_mode(struct net_device *dev)
9654 {
9655         struct tg3 *tp = netdev_priv(dev);
9656         u32 rx_mode;
9657
9658         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9659                                   RX_MODE_KEEP_VLAN_TAG);
9660
9661 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9662         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9663          * flag clear.
9664          */
9665         if (!tg3_flag(tp, ENABLE_ASF))
9666                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9667 #endif
9668
9669         if (dev->flags & IFF_PROMISC) {
9670                 /* Promiscuous mode. */
9671                 rx_mode |= RX_MODE_PROMISC;
9672         } else if (dev->flags & IFF_ALLMULTI) {
9673                 /* Accept all multicast. */
9674                 tg3_set_multi(tp, 1);
9675         } else if (netdev_mc_empty(dev)) {
9676                 /* Reject all multicast. */
9677                 tg3_set_multi(tp, 0);
9678         } else {
9679                 /* Accept one or more multicast(s). */
9680                 struct netdev_hw_addr *ha;
9681                 u32 mc_filter[4] = { 0, };
9682                 u32 regidx;
9683                 u32 bit;
9684                 u32 crc;
9685
9686                 netdev_for_each_mc_addr(ha, dev) {
9687                         crc = calc_crc(ha->addr, ETH_ALEN);
9688                         bit = ~crc & 0x7f;
9689                         regidx = (bit & 0x60) >> 5;
9690                         bit &= 0x1f;
9691                         mc_filter[regidx] |= (1 << bit);
9692                 }
9693
9694                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9695                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9696                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9697                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9698         }
9699
9700         if (rx_mode != tp->rx_mode) {
9701                 tp->rx_mode = rx_mode;
9702                 tw32_f(MAC_RX_MODE, rx_mode);
9703                 udelay(10);
9704         }
9705 }
9706
9707 static void tg3_set_rx_mode(struct net_device *dev)
9708 {
9709         struct tg3 *tp = netdev_priv(dev);
9710
9711         if (!netif_running(dev))
9712                 return;
9713
9714         tg3_full_lock(tp, 0);
9715         __tg3_set_rx_mode(dev);
9716         tg3_full_unlock(tp);
9717 }
9718
9719 static int tg3_get_regs_len(struct net_device *dev)
9720 {
9721         return TG3_REG_BLK_SIZE;
9722 }
9723
9724 static void tg3_get_regs(struct net_device *dev,
9725                 struct ethtool_regs *regs, void *_p)
9726 {
9727         struct tg3 *tp = netdev_priv(dev);
9728
9729         regs->version = 0;
9730
9731         memset(_p, 0, TG3_REG_BLK_SIZE);
9732
9733         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9734                 return;
9735
9736         tg3_full_lock(tp, 0);
9737
9738         tg3_dump_legacy_regs(tp, (u32 *)_p);
9739
9740         tg3_full_unlock(tp);
9741 }
9742
9743 static int tg3_get_eeprom_len(struct net_device *dev)
9744 {
9745         struct tg3 *tp = netdev_priv(dev);
9746
9747         return tp->nvram_size;
9748 }
9749
9750 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9751 {
9752         struct tg3 *tp = netdev_priv(dev);
9753         int ret;
9754         u8  *pd;
9755         u32 i, offset, len, b_offset, b_count;
9756         __be32 val;
9757
9758         if (tg3_flag(tp, NO_NVRAM))
9759                 return -EINVAL;
9760
9761         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9762                 return -EAGAIN;
9763
9764         offset = eeprom->offset;
9765         len = eeprom->len;
9766         eeprom->len = 0;
9767
9768         eeprom->magic = TG3_EEPROM_MAGIC;
9769
9770         if (offset & 3) {
9771                 /* adjustments to start on required 4 byte boundary */
9772                 b_offset = offset & 3;
9773                 b_count = 4 - b_offset;
9774                 if (b_count > len) {
9775                         /* i.e. offset=1 len=2 */
9776                         b_count = len;
9777                 }
9778                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9779                 if (ret)
9780                         return ret;
9781                 memcpy(data, ((char *)&val) + b_offset, b_count);
9782                 len -= b_count;
9783                 offset += b_count;
9784                 eeprom->len += b_count;
9785         }
9786
9787         /* read bytes up to the last 4 byte boundary */
9788         pd = &data[eeprom->len];
9789         for (i = 0; i < (len - (len & 3)); i += 4) {
9790                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9791                 if (ret) {
9792                         eeprom->len += i;
9793                         return ret;
9794                 }
9795                 memcpy(pd + i, &val, 4);
9796         }
9797         eeprom->len += i;
9798
9799         if (len & 3) {
9800                 /* read last bytes not ending on 4 byte boundary */
9801                 pd = &data[eeprom->len];
9802                 b_count = len & 3;
9803                 b_offset = offset + len - b_count;
9804                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9805                 if (ret)
9806                         return ret;
9807                 memcpy(pd, &val, b_count);
9808                 eeprom->len += b_count;
9809         }
9810         return 0;
9811 }
9812
9813 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9814
9815 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9816 {
9817         struct tg3 *tp = netdev_priv(dev);
9818         int ret;
9819         u32 offset, len, b_offset, odd_len;
9820         u8 *buf;
9821         __be32 start, end;
9822
9823         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9824                 return -EAGAIN;
9825
9826         if (tg3_flag(tp, NO_NVRAM) ||
9827             eeprom->magic != TG3_EEPROM_MAGIC)
9828                 return -EINVAL;
9829
9830         offset = eeprom->offset;
9831         len = eeprom->len;
9832
9833         if ((b_offset = (offset & 3))) {
9834                 /* adjustments to start on required 4 byte boundary */
9835                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9836                 if (ret)
9837                         return ret;
9838                 len += b_offset;
9839                 offset &= ~3;
9840                 if (len < 4)
9841                         len = 4;
9842         }
9843
9844         odd_len = 0;
9845         if (len & 3) {
9846                 /* adjustments to end on required 4 byte boundary */
9847                 odd_len = 1;
9848                 len = (len + 3) & ~3;
9849                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9850                 if (ret)
9851                         return ret;
9852         }
9853
9854         buf = data;
9855         if (b_offset || odd_len) {
9856                 buf = kmalloc(len, GFP_KERNEL);
9857                 if (!buf)
9858                         return -ENOMEM;
9859                 if (b_offset)
9860                         memcpy(buf, &start, 4);
9861                 if (odd_len)
9862                         memcpy(buf+len-4, &end, 4);
9863                 memcpy(buf + b_offset, data, eeprom->len);
9864         }
9865
9866         ret = tg3_nvram_write_block(tp, offset, len, buf);
9867
9868         if (buf != data)
9869                 kfree(buf);
9870
9871         return ret;
9872 }
9873
9874 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9875 {
9876         struct tg3 *tp = netdev_priv(dev);
9877
9878         if (tg3_flag(tp, USE_PHYLIB)) {
9879                 struct phy_device *phydev;
9880                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9881                         return -EAGAIN;
9882                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9883                 return phy_ethtool_gset(phydev, cmd);
9884         }
9885
9886         cmd->supported = (SUPPORTED_Autoneg);
9887
9888         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9889                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9890                                    SUPPORTED_1000baseT_Full);
9891
9892         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9893                 cmd->supported |= (SUPPORTED_100baseT_Half |
9894                                   SUPPORTED_100baseT_Full |
9895                                   SUPPORTED_10baseT_Half |
9896                                   SUPPORTED_10baseT_Full |
9897                                   SUPPORTED_TP);
9898                 cmd->port = PORT_TP;
9899         } else {
9900                 cmd->supported |= SUPPORTED_FIBRE;
9901                 cmd->port = PORT_FIBRE;
9902         }
9903
9904         cmd->advertising = tp->link_config.advertising;
9905         if (netif_running(dev)) {
9906                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9907                 cmd->duplex = tp->link_config.active_duplex;
9908         } else {
9909                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9910                 cmd->duplex = DUPLEX_INVALID;
9911         }
9912         cmd->phy_address = tp->phy_addr;
9913         cmd->transceiver = XCVR_INTERNAL;
9914         cmd->autoneg = tp->link_config.autoneg;
9915         cmd->maxtxpkt = 0;
9916         cmd->maxrxpkt = 0;
9917         return 0;
9918 }
9919
9920 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9921 {
9922         struct tg3 *tp = netdev_priv(dev);
9923         u32 speed = ethtool_cmd_speed(cmd);
9924
9925         if (tg3_flag(tp, USE_PHYLIB)) {
9926                 struct phy_device *phydev;
9927                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9928                         return -EAGAIN;
9929                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9930                 return phy_ethtool_sset(phydev, cmd);
9931         }
9932
9933         if (cmd->autoneg != AUTONEG_ENABLE &&
9934             cmd->autoneg != AUTONEG_DISABLE)
9935                 return -EINVAL;
9936
9937         if (cmd->autoneg == AUTONEG_DISABLE &&
9938             cmd->duplex != DUPLEX_FULL &&
9939             cmd->duplex != DUPLEX_HALF)
9940                 return -EINVAL;
9941
9942         if (cmd->autoneg == AUTONEG_ENABLE) {
9943                 u32 mask = ADVERTISED_Autoneg |
9944                            ADVERTISED_Pause |
9945                            ADVERTISED_Asym_Pause;
9946
9947                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9948                         mask |= ADVERTISED_1000baseT_Half |
9949                                 ADVERTISED_1000baseT_Full;
9950
9951                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9952                         mask |= ADVERTISED_100baseT_Half |
9953                                 ADVERTISED_100baseT_Full |
9954                                 ADVERTISED_10baseT_Half |
9955                                 ADVERTISED_10baseT_Full |
9956                                 ADVERTISED_TP;
9957                 else
9958                         mask |= ADVERTISED_FIBRE;
9959
9960                 if (cmd->advertising & ~mask)
9961                         return -EINVAL;
9962
9963                 mask &= (ADVERTISED_1000baseT_Half |
9964                          ADVERTISED_1000baseT_Full |
9965                          ADVERTISED_100baseT_Half |
9966                          ADVERTISED_100baseT_Full |
9967                          ADVERTISED_10baseT_Half |
9968                          ADVERTISED_10baseT_Full);
9969
9970                 cmd->advertising &= mask;
9971         } else {
9972                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9973                         if (speed != SPEED_1000)
9974                                 return -EINVAL;
9975
9976                         if (cmd->duplex != DUPLEX_FULL)
9977                                 return -EINVAL;
9978                 } else {
9979                         if (speed != SPEED_100 &&
9980                             speed != SPEED_10)
9981                                 return -EINVAL;
9982                 }
9983         }
9984
9985         tg3_full_lock(tp, 0);
9986
9987         tp->link_config.autoneg = cmd->autoneg;
9988         if (cmd->autoneg == AUTONEG_ENABLE) {
9989                 tp->link_config.advertising = (cmd->advertising |
9990                                               ADVERTISED_Autoneg);
9991                 tp->link_config.speed = SPEED_INVALID;
9992                 tp->link_config.duplex = DUPLEX_INVALID;
9993         } else {
9994                 tp->link_config.advertising = 0;
9995                 tp->link_config.speed = speed;
9996                 tp->link_config.duplex = cmd->duplex;
9997         }
9998
9999         tp->link_config.orig_speed = tp->link_config.speed;
10000         tp->link_config.orig_duplex = tp->link_config.duplex;
10001         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10002
10003         if (netif_running(dev))
10004                 tg3_setup_phy(tp, 1);
10005
10006         tg3_full_unlock(tp);
10007
10008         return 0;
10009 }
10010
10011 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10012 {
10013         struct tg3 *tp = netdev_priv(dev);
10014
10015         strcpy(info->driver, DRV_MODULE_NAME);
10016         strcpy(info->version, DRV_MODULE_VERSION);
10017         strcpy(info->fw_version, tp->fw_ver);
10018         strcpy(info->bus_info, pci_name(tp->pdev));
10019 }
10020
10021 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10022 {
10023         struct tg3 *tp = netdev_priv(dev);
10024
10025         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10026                 wol->supported = WAKE_MAGIC;
10027         else
10028                 wol->supported = 0;
10029         wol->wolopts = 0;
10030         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10031                 wol->wolopts = WAKE_MAGIC;
10032         memset(&wol->sopass, 0, sizeof(wol->sopass));
10033 }
10034
10035 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10036 {
10037         struct tg3 *tp = netdev_priv(dev);
10038         struct device *dp = &tp->pdev->dev;
10039
10040         if (wol->wolopts & ~WAKE_MAGIC)
10041                 return -EINVAL;
10042         if ((wol->wolopts & WAKE_MAGIC) &&
10043             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10044                 return -EINVAL;
10045
10046         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10047
10048         spin_lock_bh(&tp->lock);
10049         if (device_may_wakeup(dp))
10050                 tg3_flag_set(tp, WOL_ENABLE);
10051         else
10052                 tg3_flag_clear(tp, WOL_ENABLE);
10053         spin_unlock_bh(&tp->lock);
10054
10055         return 0;
10056 }
10057
10058 static u32 tg3_get_msglevel(struct net_device *dev)
10059 {
10060         struct tg3 *tp = netdev_priv(dev);
10061         return tp->msg_enable;
10062 }
10063
10064 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10065 {
10066         struct tg3 *tp = netdev_priv(dev);
10067         tp->msg_enable = value;
10068 }
10069
10070 static int tg3_nway_reset(struct net_device *dev)
10071 {
10072         struct tg3 *tp = netdev_priv(dev);
10073         int r;
10074
10075         if (!netif_running(dev))
10076                 return -EAGAIN;
10077
10078         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10079                 return -EINVAL;
10080
10081         if (tg3_flag(tp, USE_PHYLIB)) {
10082                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10083                         return -EAGAIN;
10084                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10085         } else {
10086                 u32 bmcr;
10087
10088                 spin_lock_bh(&tp->lock);
10089                 r = -EINVAL;
10090                 tg3_readphy(tp, MII_BMCR, &bmcr);
10091                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10092                     ((bmcr & BMCR_ANENABLE) ||
10093                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10094                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10095                                                    BMCR_ANENABLE);
10096                         r = 0;
10097                 }
10098                 spin_unlock_bh(&tp->lock);
10099         }
10100
10101         return r;
10102 }
10103
10104 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10105 {
10106         struct tg3 *tp = netdev_priv(dev);
10107
10108         ering->rx_max_pending = tp->rx_std_ring_mask;
10109         ering->rx_mini_max_pending = 0;
10110         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10111                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10112         else
10113                 ering->rx_jumbo_max_pending = 0;
10114
10115         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10116
10117         ering->rx_pending = tp->rx_pending;
10118         ering->rx_mini_pending = 0;
10119         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10120                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10121         else
10122                 ering->rx_jumbo_pending = 0;
10123
10124         ering->tx_pending = tp->napi[0].tx_pending;
10125 }
10126
10127 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10128 {
10129         struct tg3 *tp = netdev_priv(dev);
10130         int i, irq_sync = 0, err = 0;
10131
10132         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10133             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10134             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10135             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10136             (tg3_flag(tp, TSO_BUG) &&
10137              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10138                 return -EINVAL;
10139
10140         if (netif_running(dev)) {
10141                 tg3_phy_stop(tp);
10142                 tg3_netif_stop(tp);
10143                 irq_sync = 1;
10144         }
10145
10146         tg3_full_lock(tp, irq_sync);
10147
10148         tp->rx_pending = ering->rx_pending;
10149
10150         if (tg3_flag(tp, MAX_RXPEND_64) &&
10151             tp->rx_pending > 63)
10152                 tp->rx_pending = 63;
10153         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10154
10155         for (i = 0; i < tp->irq_max; i++)
10156                 tp->napi[i].tx_pending = ering->tx_pending;
10157
10158         if (netif_running(dev)) {
10159                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10160                 err = tg3_restart_hw(tp, 1);
10161                 if (!err)
10162                         tg3_netif_start(tp);
10163         }
10164
10165         tg3_full_unlock(tp);
10166
10167         if (irq_sync && !err)
10168                 tg3_phy_start(tp);
10169
10170         return err;
10171 }
10172
10173 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10174 {
10175         struct tg3 *tp = netdev_priv(dev);
10176
10177         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10178
10179         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10180                 epause->rx_pause = 1;
10181         else
10182                 epause->rx_pause = 0;
10183
10184         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10185                 epause->tx_pause = 1;
10186         else
10187                 epause->tx_pause = 0;
10188 }
10189
10190 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10191 {
10192         struct tg3 *tp = netdev_priv(dev);
10193         int err = 0;
10194
10195         if (tg3_flag(tp, USE_PHYLIB)) {
10196                 u32 newadv;
10197                 struct phy_device *phydev;
10198
10199                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10200
10201                 if (!(phydev->supported & SUPPORTED_Pause) ||
10202                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10203                      (epause->rx_pause != epause->tx_pause)))
10204                         return -EINVAL;
10205
10206                 tp->link_config.flowctrl = 0;
10207                 if (epause->rx_pause) {
10208                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10209
10210                         if (epause->tx_pause) {
10211                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10212                                 newadv = ADVERTISED_Pause;
10213                         } else
10214                                 newadv = ADVERTISED_Pause |
10215                                          ADVERTISED_Asym_Pause;
10216                 } else if (epause->tx_pause) {
10217                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10218                         newadv = ADVERTISED_Asym_Pause;
10219                 } else
10220                         newadv = 0;
10221
10222                 if (epause->autoneg)
10223                         tg3_flag_set(tp, PAUSE_AUTONEG);
10224                 else
10225                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10226
10227                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10228                         u32 oldadv = phydev->advertising &
10229                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10230                         if (oldadv != newadv) {
10231                                 phydev->advertising &=
10232                                         ~(ADVERTISED_Pause |
10233                                           ADVERTISED_Asym_Pause);
10234                                 phydev->advertising |= newadv;
10235                                 if (phydev->autoneg) {
10236                                         /*
10237                                          * Always renegotiate the link to
10238                                          * inform our link partner of our
10239                                          * flow control settings, even if the
10240                                          * flow control is forced.  Let
10241                                          * tg3_adjust_link() do the final
10242                                          * flow control setup.
10243                                          */
10244                                         return phy_start_aneg(phydev);
10245                                 }
10246                         }
10247
10248                         if (!epause->autoneg)
10249                                 tg3_setup_flow_control(tp, 0, 0);
10250                 } else {
10251                         tp->link_config.orig_advertising &=
10252                                         ~(ADVERTISED_Pause |
10253                                           ADVERTISED_Asym_Pause);
10254                         tp->link_config.orig_advertising |= newadv;
10255                 }
10256         } else {
10257                 int irq_sync = 0;
10258
10259                 if (netif_running(dev)) {
10260                         tg3_netif_stop(tp);
10261                         irq_sync = 1;
10262                 }
10263
10264                 tg3_full_lock(tp, irq_sync);
10265
10266                 if (epause->autoneg)
10267                         tg3_flag_set(tp, PAUSE_AUTONEG);
10268                 else
10269                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10270                 if (epause->rx_pause)
10271                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10272                 else
10273                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10274                 if (epause->tx_pause)
10275                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10276                 else
10277                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10278
10279                 if (netif_running(dev)) {
10280                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10281                         err = tg3_restart_hw(tp, 1);
10282                         if (!err)
10283                                 tg3_netif_start(tp);
10284                 }
10285
10286                 tg3_full_unlock(tp);
10287         }
10288
10289         return err;
10290 }
10291
10292 static int tg3_get_sset_count(struct net_device *dev, int sset)
10293 {
10294         switch (sset) {
10295         case ETH_SS_TEST:
10296                 return TG3_NUM_TEST;
10297         case ETH_SS_STATS:
10298                 return TG3_NUM_STATS;
10299         default:
10300                 return -EOPNOTSUPP;
10301         }
10302 }
10303
10304 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10305 {
10306         switch (stringset) {
10307         case ETH_SS_STATS:
10308                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10309                 break;
10310         case ETH_SS_TEST:
10311                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10312                 break;
10313         default:
10314                 WARN_ON(1);     /* we need a WARN() */
10315                 break;
10316         }
10317 }
10318
10319 static int tg3_set_phys_id(struct net_device *dev,
10320                             enum ethtool_phys_id_state state)
10321 {
10322         struct tg3 *tp = netdev_priv(dev);
10323
10324         if (!netif_running(tp->dev))
10325                 return -EAGAIN;
10326
10327         switch (state) {
10328         case ETHTOOL_ID_ACTIVE:
10329                 return 1;       /* cycle on/off once per second */
10330
10331         case ETHTOOL_ID_ON:
10332                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10333                      LED_CTRL_1000MBPS_ON |
10334                      LED_CTRL_100MBPS_ON |
10335                      LED_CTRL_10MBPS_ON |
10336                      LED_CTRL_TRAFFIC_OVERRIDE |
10337                      LED_CTRL_TRAFFIC_BLINK |
10338                      LED_CTRL_TRAFFIC_LED);
10339                 break;
10340
10341         case ETHTOOL_ID_OFF:
10342                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10343                      LED_CTRL_TRAFFIC_OVERRIDE);
10344                 break;
10345
10346         case ETHTOOL_ID_INACTIVE:
10347                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10348                 break;
10349         }
10350
10351         return 0;
10352 }
10353
10354 static void tg3_get_ethtool_stats(struct net_device *dev,
10355                                    struct ethtool_stats *estats, u64 *tmp_stats)
10356 {
10357         struct tg3 *tp = netdev_priv(dev);
10358         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10359 }
10360
10361 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10362 {
10363         int i;
10364         __be32 *buf;
10365         u32 offset = 0, len = 0;
10366         u32 magic, val;
10367
10368         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10369                 return NULL;
10370
10371         if (magic == TG3_EEPROM_MAGIC) {
10372                 for (offset = TG3_NVM_DIR_START;
10373                      offset < TG3_NVM_DIR_END;
10374                      offset += TG3_NVM_DIRENT_SIZE) {
10375                         if (tg3_nvram_read(tp, offset, &val))
10376                                 return NULL;
10377
10378                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10379                             TG3_NVM_DIRTYPE_EXTVPD)
10380                                 break;
10381                 }
10382
10383                 if (offset != TG3_NVM_DIR_END) {
10384                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10385                         if (tg3_nvram_read(tp, offset + 4, &offset))
10386                                 return NULL;
10387
10388                         offset = tg3_nvram_logical_addr(tp, offset);
10389                 }
10390         }
10391
10392         if (!offset || !len) {
10393                 offset = TG3_NVM_VPD_OFF;
10394                 len = TG3_NVM_VPD_LEN;
10395         }
10396
10397         buf = kmalloc(len, GFP_KERNEL);
10398         if (buf == NULL)
10399                 return NULL;
10400
10401         if (magic == TG3_EEPROM_MAGIC) {
10402                 for (i = 0; i < len; i += 4) {
10403                         /* The data is in little-endian format in NVRAM.
10404                          * Use the big-endian read routines to preserve
10405                          * the byte order as it exists in NVRAM.
10406                          */
10407                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10408                                 goto error;
10409                 }
10410         } else {
10411                 u8 *ptr;
10412                 ssize_t cnt;
10413                 unsigned int pos = 0;
10414
10415                 ptr = (u8 *)&buf[0];
10416                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10417                         cnt = pci_read_vpd(tp->pdev, pos,
10418                                            len - pos, ptr);
10419                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10420                                 cnt = 0;
10421                         else if (cnt < 0)
10422                                 goto error;
10423                 }
10424                 if (pos != len)
10425                         goto error;
10426         }
10427
10428         return buf;
10429
10430 error:
10431         kfree(buf);
10432         return NULL;
10433 }
10434
10435 #define NVRAM_TEST_SIZE 0x100
10436 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10437 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10438 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10439 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10440 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10441
10442 static int tg3_test_nvram(struct tg3 *tp)
10443 {
10444         u32 csum, magic;
10445         __be32 *buf;
10446         int i, j, k, err = 0, size;
10447
10448         if (tg3_flag(tp, NO_NVRAM))
10449                 return 0;
10450
10451         if (tg3_nvram_read(tp, 0, &magic) != 0)
10452                 return -EIO;
10453
10454         if (magic == TG3_EEPROM_MAGIC)
10455                 size = NVRAM_TEST_SIZE;
10456         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10457                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10458                     TG3_EEPROM_SB_FORMAT_1) {
10459                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10460                         case TG3_EEPROM_SB_REVISION_0:
10461                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10462                                 break;
10463                         case TG3_EEPROM_SB_REVISION_2:
10464                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10465                                 break;
10466                         case TG3_EEPROM_SB_REVISION_3:
10467                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10468                                 break;
10469                         default:
10470                                 return 0;
10471                         }
10472                 } else
10473                         return 0;
10474         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10475                 size = NVRAM_SELFBOOT_HW_SIZE;
10476         else
10477                 return -EIO;
10478
10479         buf = kmalloc(size, GFP_KERNEL);
10480         if (buf == NULL)
10481                 return -ENOMEM;
10482
10483         err = -EIO;
10484         for (i = 0, j = 0; i < size; i += 4, j++) {
10485                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10486                 if (err)
10487                         break;
10488         }
10489         if (i < size)
10490                 goto out;
10491
10492         /* Selfboot format */
10493         magic = be32_to_cpu(buf[0]);
10494         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10495             TG3_EEPROM_MAGIC_FW) {
10496                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10497
10498                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10499                     TG3_EEPROM_SB_REVISION_2) {
10500                         /* For rev 2, the csum doesn't include the MBA. */
10501                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10502                                 csum8 += buf8[i];
10503                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10504                                 csum8 += buf8[i];
10505                 } else {
10506                         for (i = 0; i < size; i++)
10507                                 csum8 += buf8[i];
10508                 }
10509
10510                 if (csum8 == 0) {
10511                         err = 0;
10512                         goto out;
10513                 }
10514
10515                 err = -EIO;
10516                 goto out;
10517         }
10518
10519         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10520             TG3_EEPROM_MAGIC_HW) {
10521                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10522                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10523                 u8 *buf8 = (u8 *) buf;
10524
10525                 /* Separate the parity bits and the data bytes.  */
10526                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10527                         if ((i == 0) || (i == 8)) {
10528                                 int l;
10529                                 u8 msk;
10530
10531                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10532                                         parity[k++] = buf8[i] & msk;
10533                                 i++;
10534                         } else if (i == 16) {
10535                                 int l;
10536                                 u8 msk;
10537
10538                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10539                                         parity[k++] = buf8[i] & msk;
10540                                 i++;
10541
10542                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10543                                         parity[k++] = buf8[i] & msk;
10544                                 i++;
10545                         }
10546                         data[j++] = buf8[i];
10547                 }
10548
10549                 err = -EIO;
10550                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10551                         u8 hw8 = hweight8(data[i]);
10552
10553                         if ((hw8 & 0x1) && parity[i])
10554                                 goto out;
10555                         else if (!(hw8 & 0x1) && !parity[i])
10556                                 goto out;
10557                 }
10558                 err = 0;
10559                 goto out;
10560         }
10561
10562         err = -EIO;
10563
10564         /* Bootstrap checksum at offset 0x10 */
10565         csum = calc_crc((unsigned char *) buf, 0x10);
10566         if (csum != le32_to_cpu(buf[0x10/4]))
10567                 goto out;
10568
10569         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10570         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10571         if (csum != le32_to_cpu(buf[0xfc/4]))
10572                 goto out;
10573
10574         kfree(buf);
10575
10576         buf = tg3_vpd_readblock(tp);
10577         if (!buf)
10578                 return -ENOMEM;
10579
10580         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10581                              PCI_VPD_LRDT_RO_DATA);
10582         if (i > 0) {
10583                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10584                 if (j < 0)
10585                         goto out;
10586
10587                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10588                         goto out;
10589
10590                 i += PCI_VPD_LRDT_TAG_SIZE;
10591                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10592                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10593                 if (j > 0) {
10594                         u8 csum8 = 0;
10595
10596                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10597
10598                         for (i = 0; i <= j; i++)
10599                                 csum8 += ((u8 *)buf)[i];
10600
10601                         if (csum8)
10602                                 goto out;
10603                 }
10604         }
10605
10606         err = 0;
10607
10608 out:
10609         kfree(buf);
10610         return err;
10611 }
10612
10613 #define TG3_SERDES_TIMEOUT_SEC  2
10614 #define TG3_COPPER_TIMEOUT_SEC  6
10615
10616 static int tg3_test_link(struct tg3 *tp)
10617 {
10618         int i, max;
10619
10620         if (!netif_running(tp->dev))
10621                 return -ENODEV;
10622
10623         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10624                 max = TG3_SERDES_TIMEOUT_SEC;
10625         else
10626                 max = TG3_COPPER_TIMEOUT_SEC;
10627
10628         for (i = 0; i < max; i++) {
10629                 if (netif_carrier_ok(tp->dev))
10630                         return 0;
10631
10632                 if (msleep_interruptible(1000))
10633                         break;
10634         }
10635
10636         return -EIO;
10637 }
10638
10639 /* Only test the commonly used registers */
10640 static int tg3_test_registers(struct tg3 *tp)
10641 {
10642         int i, is_5705, is_5750;
10643         u32 offset, read_mask, write_mask, val, save_val, read_val;
10644         static struct {
10645                 u16 offset;
10646                 u16 flags;
10647 #define TG3_FL_5705     0x1
10648 #define TG3_FL_NOT_5705 0x2
10649 #define TG3_FL_NOT_5788 0x4
10650 #define TG3_FL_NOT_5750 0x8
10651                 u32 read_mask;
10652                 u32 write_mask;
10653         } reg_tbl[] = {
10654                 /* MAC Control Registers */
10655                 { MAC_MODE, TG3_FL_NOT_5705,
10656                         0x00000000, 0x00ef6f8c },
10657                 { MAC_MODE, TG3_FL_5705,
10658                         0x00000000, 0x01ef6b8c },
10659                 { MAC_STATUS, TG3_FL_NOT_5705,
10660                         0x03800107, 0x00000000 },
10661                 { MAC_STATUS, TG3_FL_5705,
10662                         0x03800100, 0x00000000 },
10663                 { MAC_ADDR_0_HIGH, 0x0000,
10664                         0x00000000, 0x0000ffff },
10665                 { MAC_ADDR_0_LOW, 0x0000,
10666                         0x00000000, 0xffffffff },
10667                 { MAC_RX_MTU_SIZE, 0x0000,
10668                         0x00000000, 0x0000ffff },
10669                 { MAC_TX_MODE, 0x0000,
10670                         0x00000000, 0x00000070 },
10671                 { MAC_TX_LENGTHS, 0x0000,
10672                         0x00000000, 0x00003fff },
10673                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10674                         0x00000000, 0x000007fc },
10675                 { MAC_RX_MODE, TG3_FL_5705,
10676                         0x00000000, 0x000007dc },
10677                 { MAC_HASH_REG_0, 0x0000,
10678                         0x00000000, 0xffffffff },
10679                 { MAC_HASH_REG_1, 0x0000,
10680                         0x00000000, 0xffffffff },
10681                 { MAC_HASH_REG_2, 0x0000,
10682                         0x00000000, 0xffffffff },
10683                 { MAC_HASH_REG_3, 0x0000,
10684                         0x00000000, 0xffffffff },
10685
10686                 /* Receive Data and Receive BD Initiator Control Registers. */
10687                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10688                         0x00000000, 0xffffffff },
10689                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10690                         0x00000000, 0xffffffff },
10691                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10692                         0x00000000, 0x00000003 },
10693                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10694                         0x00000000, 0xffffffff },
10695                 { RCVDBDI_STD_BD+0, 0x0000,
10696                         0x00000000, 0xffffffff },
10697                 { RCVDBDI_STD_BD+4, 0x0000,
10698                         0x00000000, 0xffffffff },
10699                 { RCVDBDI_STD_BD+8, 0x0000,
10700                         0x00000000, 0xffff0002 },
10701                 { RCVDBDI_STD_BD+0xc, 0x0000,
10702                         0x00000000, 0xffffffff },
10703
10704                 /* Receive BD Initiator Control Registers. */
10705                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10706                         0x00000000, 0xffffffff },
10707                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10708                         0x00000000, 0x000003ff },
10709                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10710                         0x00000000, 0xffffffff },
10711
10712                 /* Host Coalescing Control Registers. */
10713                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10714                         0x00000000, 0x00000004 },
10715                 { HOSTCC_MODE, TG3_FL_5705,
10716                         0x00000000, 0x000000f6 },
10717                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10718                         0x00000000, 0xffffffff },
10719                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10720                         0x00000000, 0x000003ff },
10721                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10722                         0x00000000, 0xffffffff },
10723                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10724                         0x00000000, 0x000003ff },
10725                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10726                         0x00000000, 0xffffffff },
10727                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10728                         0x00000000, 0x000000ff },
10729                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10730                         0x00000000, 0xffffffff },
10731                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10732                         0x00000000, 0x000000ff },
10733                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10734                         0x00000000, 0xffffffff },
10735                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10736                         0x00000000, 0xffffffff },
10737                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10738                         0x00000000, 0xffffffff },
10739                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10740                         0x00000000, 0x000000ff },
10741                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10742                         0x00000000, 0xffffffff },
10743                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10744                         0x00000000, 0x000000ff },
10745                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10746                         0x00000000, 0xffffffff },
10747                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10748                         0x00000000, 0xffffffff },
10749                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10750                         0x00000000, 0xffffffff },
10751                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10752                         0x00000000, 0xffffffff },
10753                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10754                         0x00000000, 0xffffffff },
10755                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10756                         0xffffffff, 0x00000000 },
10757                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10758                         0xffffffff, 0x00000000 },
10759
10760                 /* Buffer Manager Control Registers. */
10761                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10762                         0x00000000, 0x007fff80 },
10763                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10764                         0x00000000, 0x007fffff },
10765                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10766                         0x00000000, 0x0000003f },
10767                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10768                         0x00000000, 0x000001ff },
10769                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10770                         0x00000000, 0x000001ff },
10771                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10772                         0xffffffff, 0x00000000 },
10773                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10774                         0xffffffff, 0x00000000 },
10775
10776                 /* Mailbox Registers */
10777                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10778                         0x00000000, 0x000001ff },
10779                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10780                         0x00000000, 0x000001ff },
10781                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10782                         0x00000000, 0x000007ff },
10783                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10784                         0x00000000, 0x000001ff },
10785
10786                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10787         };
10788
10789         is_5705 = is_5750 = 0;
10790         if (tg3_flag(tp, 5705_PLUS)) {
10791                 is_5705 = 1;
10792                 if (tg3_flag(tp, 5750_PLUS))
10793                         is_5750 = 1;
10794         }
10795
10796         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10797                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10798                         continue;
10799
10800                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10801                         continue;
10802
10803                 if (tg3_flag(tp, IS_5788) &&
10804                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10805                         continue;
10806
10807                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10808                         continue;
10809
10810                 offset = (u32) reg_tbl[i].offset;
10811                 read_mask = reg_tbl[i].read_mask;
10812                 write_mask = reg_tbl[i].write_mask;
10813
10814                 /* Save the original register content */
10815                 save_val = tr32(offset);
10816
10817                 /* Determine the read-only value. */
10818                 read_val = save_val & read_mask;
10819
10820                 /* Write zero to the register, then make sure the read-only bits
10821                  * are not changed and the read/write bits are all zeros.
10822                  */
10823                 tw32(offset, 0);
10824
10825                 val = tr32(offset);
10826
10827                 /* Test the read-only and read/write bits. */
10828                 if (((val & read_mask) != read_val) || (val & write_mask))
10829                         goto out;
10830
10831                 /* Write ones to all the bits defined by RdMask and WrMask, then
10832                  * make sure the read-only bits are not changed and the
10833                  * read/write bits are all ones.
10834                  */
10835                 tw32(offset, read_mask | write_mask);
10836
10837                 val = tr32(offset);
10838
10839                 /* Test the read-only bits. */
10840                 if ((val & read_mask) != read_val)
10841                         goto out;
10842
10843                 /* Test the read/write bits. */
10844                 if ((val & write_mask) != write_mask)
10845                         goto out;
10846
10847                 tw32(offset, save_val);
10848         }
10849
10850         return 0;
10851
10852 out:
10853         if (netif_msg_hw(tp))
10854                 netdev_err(tp->dev,
10855                            "Register test failed at offset %x\n", offset);
10856         tw32(offset, save_val);
10857         return -EIO;
10858 }
10859
10860 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10861 {
10862         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10863         int i;
10864         u32 j;
10865
10866         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10867                 for (j = 0; j < len; j += 4) {
10868                         u32 val;
10869
10870                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10871                         tg3_read_mem(tp, offset + j, &val);
10872                         if (val != test_pattern[i])
10873                                 return -EIO;
10874                 }
10875         }
10876         return 0;
10877 }
10878
10879 static int tg3_test_memory(struct tg3 *tp)
10880 {
10881         static struct mem_entry {
10882                 u32 offset;
10883                 u32 len;
10884         } mem_tbl_570x[] = {
10885                 { 0x00000000, 0x00b50},
10886                 { 0x00002000, 0x1c000},
10887                 { 0xffffffff, 0x00000}
10888         }, mem_tbl_5705[] = {
10889                 { 0x00000100, 0x0000c},
10890                 { 0x00000200, 0x00008},
10891                 { 0x00004000, 0x00800},
10892                 { 0x00006000, 0x01000},
10893                 { 0x00008000, 0x02000},
10894                 { 0x00010000, 0x0e000},
10895                 { 0xffffffff, 0x00000}
10896         }, mem_tbl_5755[] = {
10897                 { 0x00000200, 0x00008},
10898                 { 0x00004000, 0x00800},
10899                 { 0x00006000, 0x00800},
10900                 { 0x00008000, 0x02000},
10901                 { 0x00010000, 0x0c000},
10902                 { 0xffffffff, 0x00000}
10903         }, mem_tbl_5906[] = {
10904                 { 0x00000200, 0x00008},
10905                 { 0x00004000, 0x00400},
10906                 { 0x00006000, 0x00400},
10907                 { 0x00008000, 0x01000},
10908                 { 0x00010000, 0x01000},
10909                 { 0xffffffff, 0x00000}
10910         }, mem_tbl_5717[] = {
10911                 { 0x00000200, 0x00008},
10912                 { 0x00010000, 0x0a000},
10913                 { 0x00020000, 0x13c00},
10914                 { 0xffffffff, 0x00000}
10915         }, mem_tbl_57765[] = {
10916                 { 0x00000200, 0x00008},
10917                 { 0x00004000, 0x00800},
10918                 { 0x00006000, 0x09800},
10919                 { 0x00010000, 0x0a000},
10920                 { 0xffffffff, 0x00000}
10921         };
10922         struct mem_entry *mem_tbl;
10923         int err = 0;
10924         int i;
10925
10926         if (tg3_flag(tp, 5717_PLUS))
10927                 mem_tbl = mem_tbl_5717;
10928         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10929                 mem_tbl = mem_tbl_57765;
10930         else if (tg3_flag(tp, 5755_PLUS))
10931                 mem_tbl = mem_tbl_5755;
10932         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10933                 mem_tbl = mem_tbl_5906;
10934         else if (tg3_flag(tp, 5705_PLUS))
10935                 mem_tbl = mem_tbl_5705;
10936         else
10937                 mem_tbl = mem_tbl_570x;
10938
10939         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10940                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10941                 if (err)
10942                         break;
10943         }
10944
10945         return err;
10946 }
10947
10948 #define TG3_MAC_LOOPBACK        0
10949 #define TG3_PHY_LOOPBACK        1
10950 #define TG3_TSO_LOOPBACK        2
10951
10952 #define TG3_TSO_MSS             500
10953
10954 #define TG3_TSO_IP_HDR_LEN      20
10955 #define TG3_TSO_TCP_HDR_LEN     20
10956 #define TG3_TSO_TCP_OPT_LEN     12
10957
10958 static const u8 tg3_tso_header[] = {
10959 0x08, 0x00,
10960 0x45, 0x00, 0x00, 0x00,
10961 0x00, 0x00, 0x40, 0x00,
10962 0x40, 0x06, 0x00, 0x00,
10963 0x0a, 0x00, 0x00, 0x01,
10964 0x0a, 0x00, 0x00, 0x02,
10965 0x0d, 0x00, 0xe0, 0x00,
10966 0x00, 0x00, 0x01, 0x00,
10967 0x00, 0x00, 0x02, 0x00,
10968 0x80, 0x10, 0x10, 0x00,
10969 0x14, 0x09, 0x00, 0x00,
10970 0x01, 0x01, 0x08, 0x0a,
10971 0x11, 0x11, 0x11, 0x11,
10972 0x11, 0x11, 0x11, 0x11,
10973 };
10974
10975 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10976 {
10977         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10978         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10979         struct sk_buff *skb, *rx_skb;
10980         u8 *tx_data;
10981         dma_addr_t map;
10982         int num_pkts, tx_len, rx_len, i, err;
10983         struct tg3_rx_buffer_desc *desc;
10984         struct tg3_napi *tnapi, *rnapi;
10985         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10986
10987         tnapi = &tp->napi[0];
10988         rnapi = &tp->napi[0];
10989         if (tp->irq_cnt > 1) {
10990                 if (tg3_flag(tp, ENABLE_RSS))
10991                         rnapi = &tp->napi[1];
10992                 if (tg3_flag(tp, ENABLE_TSS))
10993                         tnapi = &tp->napi[1];
10994         }
10995         coal_now = tnapi->coal_now | rnapi->coal_now;
10996
10997         if (loopback_mode == TG3_MAC_LOOPBACK) {
10998                 /* HW errata - mac loopback fails in some cases on 5780.
10999                  * Normal traffic and PHY loopback are not affected by
11000                  * errata.  Also, the MAC loopback test is deprecated for
11001                  * all newer ASIC revisions.
11002                  */
11003                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11004                     tg3_flag(tp, CPMU_PRESENT))
11005                         return 0;
11006
11007                 mac_mode = tp->mac_mode &
11008                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11009                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11010                 if (!tg3_flag(tp, 5705_PLUS))
11011                         mac_mode |= MAC_MODE_LINK_POLARITY;
11012                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11013                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11014                 else
11015                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11016                 tw32(MAC_MODE, mac_mode);
11017         } else {
11018                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11019                         tg3_phy_fet_toggle_apd(tp, false);
11020                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11021                 } else
11022                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11023
11024                 tg3_phy_toggle_automdix(tp, 0);
11025
11026                 tg3_writephy(tp, MII_BMCR, val);
11027                 udelay(40);
11028
11029                 mac_mode = tp->mac_mode &
11030                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11031                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11032                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11033                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11034                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11035                         /* The write needs to be flushed for the AC131 */
11036                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11037                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11038                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11039                 } else
11040                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11041
11042                 /* reset to prevent losing 1st rx packet intermittently */
11043                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11044                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11045                         udelay(10);
11046                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11047                 }
11048                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11049                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11050                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11051                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11052                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11053                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11054                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11055                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11056                 }
11057                 tw32(MAC_MODE, mac_mode);
11058
11059                 /* Wait for link */
11060                 for (i = 0; i < 100; i++) {
11061                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11062                                 break;
11063                         mdelay(1);
11064                 }
11065         }
11066
11067         err = -EIO;
11068
11069         tx_len = pktsz;
11070         skb = netdev_alloc_skb(tp->dev, tx_len);
11071         if (!skb)
11072                 return -ENOMEM;
11073
11074         tx_data = skb_put(skb, tx_len);
11075         memcpy(tx_data, tp->dev->dev_addr, 6);
11076         memset(tx_data + 6, 0x0, 8);
11077
11078         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11079
11080         if (loopback_mode == TG3_TSO_LOOPBACK) {
11081                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11082
11083                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11084                               TG3_TSO_TCP_OPT_LEN;
11085
11086                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11087                        sizeof(tg3_tso_header));
11088                 mss = TG3_TSO_MSS;
11089
11090                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11091                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11092
11093                 /* Set the total length field in the IP header */
11094                 iph->tot_len = htons((u16)(mss + hdr_len));
11095
11096                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11097                               TXD_FLAG_CPU_POST_DMA);
11098
11099                 if (tg3_flag(tp, HW_TSO_1) ||
11100                     tg3_flag(tp, HW_TSO_2) ||
11101                     tg3_flag(tp, HW_TSO_3)) {
11102                         struct tcphdr *th;
11103                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11104                         th = (struct tcphdr *)&tx_data[val];
11105                         th->check = 0;
11106                 } else
11107                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11108
11109                 if (tg3_flag(tp, HW_TSO_3)) {
11110                         mss |= (hdr_len & 0xc) << 12;
11111                         if (hdr_len & 0x10)
11112                                 base_flags |= 0x00000010;
11113                         base_flags |= (hdr_len & 0x3e0) << 5;
11114                 } else if (tg3_flag(tp, HW_TSO_2))
11115                         mss |= hdr_len << 9;
11116                 else if (tg3_flag(tp, HW_TSO_1) ||
11117                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11118                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11119                 } else {
11120                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11121                 }
11122
11123                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11124         } else {
11125                 num_pkts = 1;
11126                 data_off = ETH_HLEN;
11127         }
11128
11129         for (i = data_off; i < tx_len; i++)
11130                 tx_data[i] = (u8) (i & 0xff);
11131
11132         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11133         if (pci_dma_mapping_error(tp->pdev, map)) {
11134                 dev_kfree_skb(skb);
11135                 return -EIO;
11136         }
11137
11138         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11139                rnapi->coal_now);
11140
11141         udelay(10);
11142
11143         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11144
11145         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11146                     base_flags, (mss << 1) | 1);
11147
11148         tnapi->tx_prod++;
11149
11150         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11151         tr32_mailbox(tnapi->prodmbox);
11152
11153         udelay(10);
11154
11155         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11156         for (i = 0; i < 35; i++) {
11157                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11158                        coal_now);
11159
11160                 udelay(10);
11161
11162                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11163                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11164                 if ((tx_idx == tnapi->tx_prod) &&
11165                     (rx_idx == (rx_start_idx + num_pkts)))
11166                         break;
11167         }
11168
11169         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11170         dev_kfree_skb(skb);
11171
11172         if (tx_idx != tnapi->tx_prod)
11173                 goto out;
11174
11175         if (rx_idx != rx_start_idx + num_pkts)
11176                 goto out;
11177
11178         val = data_off;
11179         while (rx_idx != rx_start_idx) {
11180                 desc = &rnapi->rx_rcb[rx_start_idx++];
11181                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11182                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11183
11184                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11185                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11186                         goto out;
11187
11188                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11189                          - ETH_FCS_LEN;
11190
11191                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11192                         if (rx_len != tx_len)
11193                                 goto out;
11194
11195                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11196                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11197                                         goto out;
11198                         } else {
11199                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11200                                         goto out;
11201                         }
11202                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11203                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11204                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11205                         goto out;
11206                 }
11207
11208                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11209                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11210                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11211                                              mapping);
11212                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11213                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11214                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11215                                              mapping);
11216                 } else
11217                         goto out;
11218
11219                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11220                                             PCI_DMA_FROMDEVICE);
11221
11222                 for (i = data_off; i < rx_len; i++, val++) {
11223                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11224                                 goto out;
11225                 }
11226         }
11227
11228         err = 0;
11229
11230         /* tg3_free_rings will unmap and free the rx_skb */
11231 out:
11232         return err;
11233 }
11234
11235 #define TG3_STD_LOOPBACK_FAILED         1
11236 #define TG3_JMB_LOOPBACK_FAILED         2
11237 #define TG3_TSO_LOOPBACK_FAILED         4
11238
11239 #define TG3_MAC_LOOPBACK_SHIFT          0
11240 #define TG3_PHY_LOOPBACK_SHIFT          4
11241 #define TG3_LOOPBACK_FAILED             0x00000077
11242
11243 static int tg3_test_loopback(struct tg3 *tp)
11244 {
11245         int err = 0;
11246         u32 eee_cap, cpmuctrl = 0;
11247
11248         if (!netif_running(tp->dev))
11249                 return TG3_LOOPBACK_FAILED;
11250
11251         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11252         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11253
11254         err = tg3_reset_hw(tp, 1);
11255         if (err) {
11256                 err = TG3_LOOPBACK_FAILED;
11257                 goto done;
11258         }
11259
11260         if (tg3_flag(tp, ENABLE_RSS)) {
11261                 int i;
11262
11263                 /* Reroute all rx packets to the 1st queue */
11264                 for (i = MAC_RSS_INDIR_TBL_0;
11265                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11266                         tw32(i, 0x0);
11267         }
11268
11269         /* Turn off gphy autopowerdown. */
11270         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11271                 tg3_phy_toggle_apd(tp, false);
11272
11273         if (tg3_flag(tp, CPMU_PRESENT)) {
11274                 int i;
11275                 u32 status;
11276
11277                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11278
11279                 /* Wait for up to 40 microseconds to acquire lock. */
11280                 for (i = 0; i < 4; i++) {
11281                         status = tr32(TG3_CPMU_MUTEX_GNT);
11282                         if (status == CPMU_MUTEX_GNT_DRIVER)
11283                                 break;
11284                         udelay(10);
11285                 }
11286
11287                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11288                         err = TG3_LOOPBACK_FAILED;
11289                         goto done;
11290                 }
11291
11292                 /* Turn off link-based power management. */
11293                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11294                 tw32(TG3_CPMU_CTRL,
11295                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11296                                   CPMU_CTRL_LINK_AWARE_MODE));
11297         }
11298
11299         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11300                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11301
11302         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11303             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11304                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11305
11306         if (tg3_flag(tp, CPMU_PRESENT)) {
11307                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11308
11309                 /* Release the mutex */
11310                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11311         }
11312
11313         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11314             !tg3_flag(tp, USE_PHYLIB)) {
11315                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11316                         err |= TG3_STD_LOOPBACK_FAILED <<
11317                                TG3_PHY_LOOPBACK_SHIFT;
11318                 if (tg3_flag(tp, TSO_CAPABLE) &&
11319                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11320                         err |= TG3_TSO_LOOPBACK_FAILED <<
11321                                TG3_PHY_LOOPBACK_SHIFT;
11322                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11323                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11324                         err |= TG3_JMB_LOOPBACK_FAILED <<
11325                                TG3_PHY_LOOPBACK_SHIFT;
11326         }
11327
11328         /* Re-enable gphy autopowerdown. */
11329         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11330                 tg3_phy_toggle_apd(tp, true);
11331
11332 done:
11333         tp->phy_flags |= eee_cap;
11334
11335         return err;
11336 }
11337
11338 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11339                           u64 *data)
11340 {
11341         struct tg3 *tp = netdev_priv(dev);
11342
11343         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11344                 tg3_power_up(tp);
11345
11346         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11347
11348         if (tg3_test_nvram(tp) != 0) {
11349                 etest->flags |= ETH_TEST_FL_FAILED;
11350                 data[0] = 1;
11351         }
11352         if (tg3_test_link(tp) != 0) {
11353                 etest->flags |= ETH_TEST_FL_FAILED;
11354                 data[1] = 1;
11355         }
11356         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11357                 int err, err2 = 0, irq_sync = 0;
11358
11359                 if (netif_running(dev)) {
11360                         tg3_phy_stop(tp);
11361                         tg3_netif_stop(tp);
11362                         irq_sync = 1;
11363                 }
11364
11365                 tg3_full_lock(tp, irq_sync);
11366
11367                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11368                 err = tg3_nvram_lock(tp);
11369                 tg3_halt_cpu(tp, RX_CPU_BASE);
11370                 if (!tg3_flag(tp, 5705_PLUS))
11371                         tg3_halt_cpu(tp, TX_CPU_BASE);
11372                 if (!err)
11373                         tg3_nvram_unlock(tp);
11374
11375                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11376                         tg3_phy_reset(tp);
11377
11378                 if (tg3_test_registers(tp) != 0) {
11379                         etest->flags |= ETH_TEST_FL_FAILED;
11380                         data[2] = 1;
11381                 }
11382                 if (tg3_test_memory(tp) != 0) {
11383                         etest->flags |= ETH_TEST_FL_FAILED;
11384                         data[3] = 1;
11385                 }
11386                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11387                         etest->flags |= ETH_TEST_FL_FAILED;
11388
11389                 tg3_full_unlock(tp);
11390
11391                 if (tg3_test_interrupt(tp) != 0) {
11392                         etest->flags |= ETH_TEST_FL_FAILED;
11393                         data[5] = 1;
11394                 }
11395
11396                 tg3_full_lock(tp, 0);
11397
11398                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11399                 if (netif_running(dev)) {
11400                         tg3_flag_set(tp, INIT_COMPLETE);
11401                         err2 = tg3_restart_hw(tp, 1);
11402                         if (!err2)
11403                                 tg3_netif_start(tp);
11404                 }
11405
11406                 tg3_full_unlock(tp);
11407
11408                 if (irq_sync && !err2)
11409                         tg3_phy_start(tp);
11410         }
11411         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11412                 tg3_power_down(tp);
11413
11414 }
11415
11416 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11417 {
11418         struct mii_ioctl_data *data = if_mii(ifr);
11419         struct tg3 *tp = netdev_priv(dev);
11420         int err;
11421
11422         if (tg3_flag(tp, USE_PHYLIB)) {
11423                 struct phy_device *phydev;
11424                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11425                         return -EAGAIN;
11426                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11427                 return phy_mii_ioctl(phydev, ifr, cmd);
11428         }
11429
11430         switch (cmd) {
11431         case SIOCGMIIPHY:
11432                 data->phy_id = tp->phy_addr;
11433
11434                 /* fallthru */
11435         case SIOCGMIIREG: {
11436                 u32 mii_regval;
11437
11438                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11439                         break;                  /* We have no PHY */
11440
11441                 if (!netif_running(dev))
11442                         return -EAGAIN;
11443
11444                 spin_lock_bh(&tp->lock);
11445                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11446                 spin_unlock_bh(&tp->lock);
11447
11448                 data->val_out = mii_regval;
11449
11450                 return err;
11451         }
11452
11453         case SIOCSMIIREG:
11454                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11455                         break;                  /* We have no PHY */
11456
11457                 if (!netif_running(dev))
11458                         return -EAGAIN;
11459
11460                 spin_lock_bh(&tp->lock);
11461                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11462                 spin_unlock_bh(&tp->lock);
11463
11464                 return err;
11465
11466         default:
11467                 /* do nothing */
11468                 break;
11469         }
11470         return -EOPNOTSUPP;
11471 }
11472
11473 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11474 {
11475         struct tg3 *tp = netdev_priv(dev);
11476
11477         memcpy(ec, &tp->coal, sizeof(*ec));
11478         return 0;
11479 }
11480
11481 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11482 {
11483         struct tg3 *tp = netdev_priv(dev);
11484         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11485         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11486
11487         if (!tg3_flag(tp, 5705_PLUS)) {
11488                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11489                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11490                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11491                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11492         }
11493
11494         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11495             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11496             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11497             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11498             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11499             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11500             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11501             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11502             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11503             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11504                 return -EINVAL;
11505
11506         /* No rx interrupts will be generated if both are zero */
11507         if ((ec->rx_coalesce_usecs == 0) &&
11508             (ec->rx_max_coalesced_frames == 0))
11509                 return -EINVAL;
11510
11511         /* No tx interrupts will be generated if both are zero */
11512         if ((ec->tx_coalesce_usecs == 0) &&
11513             (ec->tx_max_coalesced_frames == 0))
11514                 return -EINVAL;
11515
11516         /* Only copy relevant parameters, ignore all others. */
11517         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11518         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11519         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11520         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11521         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11522         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11523         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11524         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11525         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11526
11527         if (netif_running(dev)) {
11528                 tg3_full_lock(tp, 0);
11529                 __tg3_set_coalesce(tp, &tp->coal);
11530                 tg3_full_unlock(tp);
11531         }
11532         return 0;
11533 }
11534
11535 static const struct ethtool_ops tg3_ethtool_ops = {
11536         .get_settings           = tg3_get_settings,
11537         .set_settings           = tg3_set_settings,
11538         .get_drvinfo            = tg3_get_drvinfo,
11539         .get_regs_len           = tg3_get_regs_len,
11540         .get_regs               = tg3_get_regs,
11541         .get_wol                = tg3_get_wol,
11542         .set_wol                = tg3_set_wol,
11543         .get_msglevel           = tg3_get_msglevel,
11544         .set_msglevel           = tg3_set_msglevel,
11545         .nway_reset             = tg3_nway_reset,
11546         .get_link               = ethtool_op_get_link,
11547         .get_eeprom_len         = tg3_get_eeprom_len,
11548         .get_eeprom             = tg3_get_eeprom,
11549         .set_eeprom             = tg3_set_eeprom,
11550         .get_ringparam          = tg3_get_ringparam,
11551         .set_ringparam          = tg3_set_ringparam,
11552         .get_pauseparam         = tg3_get_pauseparam,
11553         .set_pauseparam         = tg3_set_pauseparam,
11554         .self_test              = tg3_self_test,
11555         .get_strings            = tg3_get_strings,
11556         .set_phys_id            = tg3_set_phys_id,
11557         .get_ethtool_stats      = tg3_get_ethtool_stats,
11558         .get_coalesce           = tg3_get_coalesce,
11559         .set_coalesce           = tg3_set_coalesce,
11560         .get_sset_count         = tg3_get_sset_count,
11561 };
11562
11563 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11564 {
11565         u32 cursize, val, magic;
11566
11567         tp->nvram_size = EEPROM_CHIP_SIZE;
11568
11569         if (tg3_nvram_read(tp, 0, &magic) != 0)
11570                 return;
11571
11572         if ((magic != TG3_EEPROM_MAGIC) &&
11573             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11574             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11575                 return;
11576
11577         /*
11578          * Size the chip by reading offsets at increasing powers of two.
11579          * When we encounter our validation signature, we know the addressing
11580          * has wrapped around, and thus have our chip size.
11581          */
11582         cursize = 0x10;
11583
11584         while (cursize < tp->nvram_size) {
11585                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11586                         return;
11587
11588                 if (val == magic)
11589                         break;
11590
11591                 cursize <<= 1;
11592         }
11593
11594         tp->nvram_size = cursize;
11595 }
11596
11597 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11598 {
11599         u32 val;
11600
11601         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11602                 return;
11603
11604         /* Selfboot format */
11605         if (val != TG3_EEPROM_MAGIC) {
11606                 tg3_get_eeprom_size(tp);
11607                 return;
11608         }
11609
11610         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11611                 if (val != 0) {
11612                         /* This is confusing.  We want to operate on the
11613                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11614                          * call will read from NVRAM and byteswap the data
11615                          * according to the byteswapping settings for all
11616                          * other register accesses.  This ensures the data we
11617                          * want will always reside in the lower 16-bits.
11618                          * However, the data in NVRAM is in LE format, which
11619                          * means the data from the NVRAM read will always be
11620                          * opposite the endianness of the CPU.  The 16-bit
11621                          * byteswap then brings the data to CPU endianness.
11622                          */
11623                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11624                         return;
11625                 }
11626         }
11627         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11628 }
11629
11630 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11631 {
11632         u32 nvcfg1;
11633
11634         nvcfg1 = tr32(NVRAM_CFG1);
11635         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11636                 tg3_flag_set(tp, FLASH);
11637         } else {
11638                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11639                 tw32(NVRAM_CFG1, nvcfg1);
11640         }
11641
11642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11643             tg3_flag(tp, 5780_CLASS)) {
11644                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11645                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11646                         tp->nvram_jedecnum = JEDEC_ATMEL;
11647                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11648                         tg3_flag_set(tp, NVRAM_BUFFERED);
11649                         break;
11650                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11651                         tp->nvram_jedecnum = JEDEC_ATMEL;
11652                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11653                         break;
11654                 case FLASH_VENDOR_ATMEL_EEPROM:
11655                         tp->nvram_jedecnum = JEDEC_ATMEL;
11656                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11657                         tg3_flag_set(tp, NVRAM_BUFFERED);
11658                         break;
11659                 case FLASH_VENDOR_ST:
11660                         tp->nvram_jedecnum = JEDEC_ST;
11661                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11662                         tg3_flag_set(tp, NVRAM_BUFFERED);
11663                         break;
11664                 case FLASH_VENDOR_SAIFUN:
11665                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11666                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11667                         break;
11668                 case FLASH_VENDOR_SST_SMALL:
11669                 case FLASH_VENDOR_SST_LARGE:
11670                         tp->nvram_jedecnum = JEDEC_SST;
11671                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11672                         break;
11673                 }
11674         } else {
11675                 tp->nvram_jedecnum = JEDEC_ATMEL;
11676                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11677                 tg3_flag_set(tp, NVRAM_BUFFERED);
11678         }
11679 }
11680
11681 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11682 {
11683         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11684         case FLASH_5752PAGE_SIZE_256:
11685                 tp->nvram_pagesize = 256;
11686                 break;
11687         case FLASH_5752PAGE_SIZE_512:
11688                 tp->nvram_pagesize = 512;
11689                 break;
11690         case FLASH_5752PAGE_SIZE_1K:
11691                 tp->nvram_pagesize = 1024;
11692                 break;
11693         case FLASH_5752PAGE_SIZE_2K:
11694                 tp->nvram_pagesize = 2048;
11695                 break;
11696         case FLASH_5752PAGE_SIZE_4K:
11697                 tp->nvram_pagesize = 4096;
11698                 break;
11699         case FLASH_5752PAGE_SIZE_264:
11700                 tp->nvram_pagesize = 264;
11701                 break;
11702         case FLASH_5752PAGE_SIZE_528:
11703                 tp->nvram_pagesize = 528;
11704                 break;
11705         }
11706 }
11707
11708 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11709 {
11710         u32 nvcfg1;
11711
11712         nvcfg1 = tr32(NVRAM_CFG1);
11713
11714         /* NVRAM protection for TPM */
11715         if (nvcfg1 & (1 << 27))
11716                 tg3_flag_set(tp, PROTECTED_NVRAM);
11717
11718         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11719         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11720         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11721                 tp->nvram_jedecnum = JEDEC_ATMEL;
11722                 tg3_flag_set(tp, NVRAM_BUFFERED);
11723                 break;
11724         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11725                 tp->nvram_jedecnum = JEDEC_ATMEL;
11726                 tg3_flag_set(tp, NVRAM_BUFFERED);
11727                 tg3_flag_set(tp, FLASH);
11728                 break;
11729         case FLASH_5752VENDOR_ST_M45PE10:
11730         case FLASH_5752VENDOR_ST_M45PE20:
11731         case FLASH_5752VENDOR_ST_M45PE40:
11732                 tp->nvram_jedecnum = JEDEC_ST;
11733                 tg3_flag_set(tp, NVRAM_BUFFERED);
11734                 tg3_flag_set(tp, FLASH);
11735                 break;
11736         }
11737
11738         if (tg3_flag(tp, FLASH)) {
11739                 tg3_nvram_get_pagesize(tp, nvcfg1);
11740         } else {
11741                 /* For eeprom, set pagesize to maximum eeprom size */
11742                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11743
11744                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11745                 tw32(NVRAM_CFG1, nvcfg1);
11746         }
11747 }
11748
11749 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11750 {
11751         u32 nvcfg1, protect = 0;
11752
11753         nvcfg1 = tr32(NVRAM_CFG1);
11754
11755         /* NVRAM protection for TPM */
11756         if (nvcfg1 & (1 << 27)) {
11757                 tg3_flag_set(tp, PROTECTED_NVRAM);
11758                 protect = 1;
11759         }
11760
11761         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11762         switch (nvcfg1) {
11763         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11764         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11765         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11766         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11767                 tp->nvram_jedecnum = JEDEC_ATMEL;
11768                 tg3_flag_set(tp, NVRAM_BUFFERED);
11769                 tg3_flag_set(tp, FLASH);
11770                 tp->nvram_pagesize = 264;
11771                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11772                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11773                         tp->nvram_size = (protect ? 0x3e200 :
11774                                           TG3_NVRAM_SIZE_512KB);
11775                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11776                         tp->nvram_size = (protect ? 0x1f200 :
11777                                           TG3_NVRAM_SIZE_256KB);
11778                 else
11779                         tp->nvram_size = (protect ? 0x1f200 :
11780                                           TG3_NVRAM_SIZE_128KB);
11781                 break;
11782         case FLASH_5752VENDOR_ST_M45PE10:
11783         case FLASH_5752VENDOR_ST_M45PE20:
11784         case FLASH_5752VENDOR_ST_M45PE40:
11785                 tp->nvram_jedecnum = JEDEC_ST;
11786                 tg3_flag_set(tp, NVRAM_BUFFERED);
11787                 tg3_flag_set(tp, FLASH);
11788                 tp->nvram_pagesize = 256;
11789                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11790                         tp->nvram_size = (protect ?
11791                                           TG3_NVRAM_SIZE_64KB :
11792                                           TG3_NVRAM_SIZE_128KB);
11793                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11794                         tp->nvram_size = (protect ?
11795                                           TG3_NVRAM_SIZE_64KB :
11796                                           TG3_NVRAM_SIZE_256KB);
11797                 else
11798                         tp->nvram_size = (protect ?
11799                                           TG3_NVRAM_SIZE_128KB :
11800                                           TG3_NVRAM_SIZE_512KB);
11801                 break;
11802         }
11803 }
11804
11805 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11806 {
11807         u32 nvcfg1;
11808
11809         nvcfg1 = tr32(NVRAM_CFG1);
11810
11811         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11812         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11813         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11814         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11815         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11816                 tp->nvram_jedecnum = JEDEC_ATMEL;
11817                 tg3_flag_set(tp, NVRAM_BUFFERED);
11818                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11819
11820                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11821                 tw32(NVRAM_CFG1, nvcfg1);
11822                 break;
11823         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11824         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11825         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11826         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11827                 tp->nvram_jedecnum = JEDEC_ATMEL;
11828                 tg3_flag_set(tp, NVRAM_BUFFERED);
11829                 tg3_flag_set(tp, FLASH);
11830                 tp->nvram_pagesize = 264;
11831                 break;
11832         case FLASH_5752VENDOR_ST_M45PE10:
11833         case FLASH_5752VENDOR_ST_M45PE20:
11834         case FLASH_5752VENDOR_ST_M45PE40:
11835                 tp->nvram_jedecnum = JEDEC_ST;
11836                 tg3_flag_set(tp, NVRAM_BUFFERED);
11837                 tg3_flag_set(tp, FLASH);
11838                 tp->nvram_pagesize = 256;
11839                 break;
11840         }
11841 }
11842
11843 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11844 {
11845         u32 nvcfg1, protect = 0;
11846
11847         nvcfg1 = tr32(NVRAM_CFG1);
11848
11849         /* NVRAM protection for TPM */
11850         if (nvcfg1 & (1 << 27)) {
11851                 tg3_flag_set(tp, PROTECTED_NVRAM);
11852                 protect = 1;
11853         }
11854
11855         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11856         switch (nvcfg1) {
11857         case FLASH_5761VENDOR_ATMEL_ADB021D:
11858         case FLASH_5761VENDOR_ATMEL_ADB041D:
11859         case FLASH_5761VENDOR_ATMEL_ADB081D:
11860         case FLASH_5761VENDOR_ATMEL_ADB161D:
11861         case FLASH_5761VENDOR_ATMEL_MDB021D:
11862         case FLASH_5761VENDOR_ATMEL_MDB041D:
11863         case FLASH_5761VENDOR_ATMEL_MDB081D:
11864         case FLASH_5761VENDOR_ATMEL_MDB161D:
11865                 tp->nvram_jedecnum = JEDEC_ATMEL;
11866                 tg3_flag_set(tp, NVRAM_BUFFERED);
11867                 tg3_flag_set(tp, FLASH);
11868                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11869                 tp->nvram_pagesize = 256;
11870                 break;
11871         case FLASH_5761VENDOR_ST_A_M45PE20:
11872         case FLASH_5761VENDOR_ST_A_M45PE40:
11873         case FLASH_5761VENDOR_ST_A_M45PE80:
11874         case FLASH_5761VENDOR_ST_A_M45PE16:
11875         case FLASH_5761VENDOR_ST_M_M45PE20:
11876         case FLASH_5761VENDOR_ST_M_M45PE40:
11877         case FLASH_5761VENDOR_ST_M_M45PE80:
11878         case FLASH_5761VENDOR_ST_M_M45PE16:
11879                 tp->nvram_jedecnum = JEDEC_ST;
11880                 tg3_flag_set(tp, NVRAM_BUFFERED);
11881                 tg3_flag_set(tp, FLASH);
11882                 tp->nvram_pagesize = 256;
11883                 break;
11884         }
11885
11886         if (protect) {
11887                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11888         } else {
11889                 switch (nvcfg1) {
11890                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11891                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11892                 case FLASH_5761VENDOR_ST_A_M45PE16:
11893                 case FLASH_5761VENDOR_ST_M_M45PE16:
11894                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11895                         break;
11896                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11897                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11898                 case FLASH_5761VENDOR_ST_A_M45PE80:
11899                 case FLASH_5761VENDOR_ST_M_M45PE80:
11900                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11901                         break;
11902                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11903                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11904                 case FLASH_5761VENDOR_ST_A_M45PE40:
11905                 case FLASH_5761VENDOR_ST_M_M45PE40:
11906                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11907                         break;
11908                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11909                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11910                 case FLASH_5761VENDOR_ST_A_M45PE20:
11911                 case FLASH_5761VENDOR_ST_M_M45PE20:
11912                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11913                         break;
11914                 }
11915         }
11916 }
11917
11918 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11919 {
11920         tp->nvram_jedecnum = JEDEC_ATMEL;
11921         tg3_flag_set(tp, NVRAM_BUFFERED);
11922         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11923 }
11924
11925 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11926 {
11927         u32 nvcfg1;
11928
11929         nvcfg1 = tr32(NVRAM_CFG1);
11930
11931         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11932         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11933         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11934                 tp->nvram_jedecnum = JEDEC_ATMEL;
11935                 tg3_flag_set(tp, NVRAM_BUFFERED);
11936                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11937
11938                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11939                 tw32(NVRAM_CFG1, nvcfg1);
11940                 return;
11941         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11942         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11943         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11944         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11945         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11946         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11947         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11948                 tp->nvram_jedecnum = JEDEC_ATMEL;
11949                 tg3_flag_set(tp, NVRAM_BUFFERED);
11950                 tg3_flag_set(tp, FLASH);
11951
11952                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11953                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11954                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11955                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11956                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11957                         break;
11958                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11959                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11960                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11961                         break;
11962                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11963                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11964                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11965                         break;
11966                 }
11967                 break;
11968         case FLASH_5752VENDOR_ST_M45PE10:
11969         case FLASH_5752VENDOR_ST_M45PE20:
11970         case FLASH_5752VENDOR_ST_M45PE40:
11971                 tp->nvram_jedecnum = JEDEC_ST;
11972                 tg3_flag_set(tp, NVRAM_BUFFERED);
11973                 tg3_flag_set(tp, FLASH);
11974
11975                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11976                 case FLASH_5752VENDOR_ST_M45PE10:
11977                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11978                         break;
11979                 case FLASH_5752VENDOR_ST_M45PE20:
11980                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11981                         break;
11982                 case FLASH_5752VENDOR_ST_M45PE40:
11983                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11984                         break;
11985                 }
11986                 break;
11987         default:
11988                 tg3_flag_set(tp, NO_NVRAM);
11989                 return;
11990         }
11991
11992         tg3_nvram_get_pagesize(tp, nvcfg1);
11993         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11994                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11995 }
11996
11997
11998 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11999 {
12000         u32 nvcfg1;
12001
12002         nvcfg1 = tr32(NVRAM_CFG1);
12003
12004         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12005         case FLASH_5717VENDOR_ATMEL_EEPROM:
12006         case FLASH_5717VENDOR_MICRO_EEPROM:
12007                 tp->nvram_jedecnum = JEDEC_ATMEL;
12008                 tg3_flag_set(tp, NVRAM_BUFFERED);
12009                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12010
12011                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12012                 tw32(NVRAM_CFG1, nvcfg1);
12013                 return;
12014         case FLASH_5717VENDOR_ATMEL_MDB011D:
12015         case FLASH_5717VENDOR_ATMEL_ADB011B:
12016         case FLASH_5717VENDOR_ATMEL_ADB011D:
12017         case FLASH_5717VENDOR_ATMEL_MDB021D:
12018         case FLASH_5717VENDOR_ATMEL_ADB021B:
12019         case FLASH_5717VENDOR_ATMEL_ADB021D:
12020         case FLASH_5717VENDOR_ATMEL_45USPT:
12021                 tp->nvram_jedecnum = JEDEC_ATMEL;
12022                 tg3_flag_set(tp, NVRAM_BUFFERED);
12023                 tg3_flag_set(tp, FLASH);
12024
12025                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12026                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12027                         /* Detect size with tg3_nvram_get_size() */
12028                         break;
12029                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12030                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12031                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12032                         break;
12033                 default:
12034                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12035                         break;
12036                 }
12037                 break;
12038         case FLASH_5717VENDOR_ST_M_M25PE10:
12039         case FLASH_5717VENDOR_ST_A_M25PE10:
12040         case FLASH_5717VENDOR_ST_M_M45PE10:
12041         case FLASH_5717VENDOR_ST_A_M45PE10:
12042         case FLASH_5717VENDOR_ST_M_M25PE20:
12043         case FLASH_5717VENDOR_ST_A_M25PE20:
12044         case FLASH_5717VENDOR_ST_M_M45PE20:
12045         case FLASH_5717VENDOR_ST_A_M45PE20:
12046         case FLASH_5717VENDOR_ST_25USPT:
12047         case FLASH_5717VENDOR_ST_45USPT:
12048                 tp->nvram_jedecnum = JEDEC_ST;
12049                 tg3_flag_set(tp, NVRAM_BUFFERED);
12050                 tg3_flag_set(tp, FLASH);
12051
12052                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12053                 case FLASH_5717VENDOR_ST_M_M25PE20:
12054                 case FLASH_5717VENDOR_ST_M_M45PE20:
12055                         /* Detect size with tg3_nvram_get_size() */
12056                         break;
12057                 case FLASH_5717VENDOR_ST_A_M25PE20:
12058                 case FLASH_5717VENDOR_ST_A_M45PE20:
12059                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12060                         break;
12061                 default:
12062                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12063                         break;
12064                 }
12065                 break;
12066         default:
12067                 tg3_flag_set(tp, NO_NVRAM);
12068                 return;
12069         }
12070
12071         tg3_nvram_get_pagesize(tp, nvcfg1);
12072         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12073                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12074 }
12075
12076 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12077 {
12078         u32 nvcfg1, nvmpinstrp;
12079
12080         nvcfg1 = tr32(NVRAM_CFG1);
12081         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12082
12083         switch (nvmpinstrp) {
12084         case FLASH_5720_EEPROM_HD:
12085         case FLASH_5720_EEPROM_LD:
12086                 tp->nvram_jedecnum = JEDEC_ATMEL;
12087                 tg3_flag_set(tp, NVRAM_BUFFERED);
12088
12089                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12090                 tw32(NVRAM_CFG1, nvcfg1);
12091                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12092                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12093                 else
12094                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12095                 return;
12096         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12097         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12098         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12099         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12100         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12101         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12102         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12103         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12104         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12105         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12106         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12107         case FLASH_5720VENDOR_ATMEL_45USPT:
12108                 tp->nvram_jedecnum = JEDEC_ATMEL;
12109                 tg3_flag_set(tp, NVRAM_BUFFERED);
12110                 tg3_flag_set(tp, FLASH);
12111
12112                 switch (nvmpinstrp) {
12113                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12114                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12115                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12116                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12117                         break;
12118                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12119                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12120                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12121                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12122                         break;
12123                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12124                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12125                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12126                         break;
12127                 default:
12128                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12129                         break;
12130                 }
12131                 break;
12132         case FLASH_5720VENDOR_M_ST_M25PE10:
12133         case FLASH_5720VENDOR_M_ST_M45PE10:
12134         case FLASH_5720VENDOR_A_ST_M25PE10:
12135         case FLASH_5720VENDOR_A_ST_M45PE10:
12136         case FLASH_5720VENDOR_M_ST_M25PE20:
12137         case FLASH_5720VENDOR_M_ST_M45PE20:
12138         case FLASH_5720VENDOR_A_ST_M25PE20:
12139         case FLASH_5720VENDOR_A_ST_M45PE20:
12140         case FLASH_5720VENDOR_M_ST_M25PE40:
12141         case FLASH_5720VENDOR_M_ST_M45PE40:
12142         case FLASH_5720VENDOR_A_ST_M25PE40:
12143         case FLASH_5720VENDOR_A_ST_M45PE40:
12144         case FLASH_5720VENDOR_M_ST_M25PE80:
12145         case FLASH_5720VENDOR_M_ST_M45PE80:
12146         case FLASH_5720VENDOR_A_ST_M25PE80:
12147         case FLASH_5720VENDOR_A_ST_M45PE80:
12148         case FLASH_5720VENDOR_ST_25USPT:
12149         case FLASH_5720VENDOR_ST_45USPT:
12150                 tp->nvram_jedecnum = JEDEC_ST;
12151                 tg3_flag_set(tp, NVRAM_BUFFERED);
12152                 tg3_flag_set(tp, FLASH);
12153
12154                 switch (nvmpinstrp) {
12155                 case FLASH_5720VENDOR_M_ST_M25PE20:
12156                 case FLASH_5720VENDOR_M_ST_M45PE20:
12157                 case FLASH_5720VENDOR_A_ST_M25PE20:
12158                 case FLASH_5720VENDOR_A_ST_M45PE20:
12159                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12160                         break;
12161                 case FLASH_5720VENDOR_M_ST_M25PE40:
12162                 case FLASH_5720VENDOR_M_ST_M45PE40:
12163                 case FLASH_5720VENDOR_A_ST_M25PE40:
12164                 case FLASH_5720VENDOR_A_ST_M45PE40:
12165                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12166                         break;
12167                 case FLASH_5720VENDOR_M_ST_M25PE80:
12168                 case FLASH_5720VENDOR_M_ST_M45PE80:
12169                 case FLASH_5720VENDOR_A_ST_M25PE80:
12170                 case FLASH_5720VENDOR_A_ST_M45PE80:
12171                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12172                         break;
12173                 default:
12174                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12175                         break;
12176                 }
12177                 break;
12178         default:
12179                 tg3_flag_set(tp, NO_NVRAM);
12180                 return;
12181         }
12182
12183         tg3_nvram_get_pagesize(tp, nvcfg1);
12184         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12185                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12186 }
12187
12188 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12189 static void __devinit tg3_nvram_init(struct tg3 *tp)
12190 {
12191         tw32_f(GRC_EEPROM_ADDR,
12192              (EEPROM_ADDR_FSM_RESET |
12193               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12194                EEPROM_ADDR_CLKPERD_SHIFT)));
12195
12196         msleep(1);
12197
12198         /* Enable seeprom accesses. */
12199         tw32_f(GRC_LOCAL_CTRL,
12200              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12201         udelay(100);
12202
12203         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12204             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12205                 tg3_flag_set(tp, NVRAM);
12206
12207                 if (tg3_nvram_lock(tp)) {
12208                         netdev_warn(tp->dev,
12209                                     "Cannot get nvram lock, %s failed\n",
12210                                     __func__);
12211                         return;
12212                 }
12213                 tg3_enable_nvram_access(tp);
12214
12215                 tp->nvram_size = 0;
12216
12217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12218                         tg3_get_5752_nvram_info(tp);
12219                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12220                         tg3_get_5755_nvram_info(tp);
12221                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12222                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12223                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12224                         tg3_get_5787_nvram_info(tp);
12225                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12226                         tg3_get_5761_nvram_info(tp);
12227                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12228                         tg3_get_5906_nvram_info(tp);
12229                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12230                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12231                         tg3_get_57780_nvram_info(tp);
12232                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12233                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12234                         tg3_get_5717_nvram_info(tp);
12235                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12236                         tg3_get_5720_nvram_info(tp);
12237                 else
12238                         tg3_get_nvram_info(tp);
12239
12240                 if (tp->nvram_size == 0)
12241                         tg3_get_nvram_size(tp);
12242
12243                 tg3_disable_nvram_access(tp);
12244                 tg3_nvram_unlock(tp);
12245
12246         } else {
12247                 tg3_flag_clear(tp, NVRAM);
12248                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12249
12250                 tg3_get_eeprom_size(tp);
12251         }
12252 }
12253
12254 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12255                                     u32 offset, u32 len, u8 *buf)
12256 {
12257         int i, j, rc = 0;
12258         u32 val;
12259
12260         for (i = 0; i < len; i += 4) {
12261                 u32 addr;
12262                 __be32 data;
12263
12264                 addr = offset + i;
12265
12266                 memcpy(&data, buf + i, 4);
12267
12268                 /*
12269                  * The SEEPROM interface expects the data to always be opposite
12270                  * the native endian format.  We accomplish this by reversing
12271                  * all the operations that would have been performed on the
12272                  * data from a call to tg3_nvram_read_be32().
12273                  */
12274                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12275
12276                 val = tr32(GRC_EEPROM_ADDR);
12277                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12278
12279                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12280                         EEPROM_ADDR_READ);
12281                 tw32(GRC_EEPROM_ADDR, val |
12282                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12283                         (addr & EEPROM_ADDR_ADDR_MASK) |
12284                         EEPROM_ADDR_START |
12285                         EEPROM_ADDR_WRITE);
12286
12287                 for (j = 0; j < 1000; j++) {
12288                         val = tr32(GRC_EEPROM_ADDR);
12289
12290                         if (val & EEPROM_ADDR_COMPLETE)
12291                                 break;
12292                         msleep(1);
12293                 }
12294                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12295                         rc = -EBUSY;
12296                         break;
12297                 }
12298         }
12299
12300         return rc;
12301 }
12302
12303 /* offset and length are dword aligned */
12304 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12305                 u8 *buf)
12306 {
12307         int ret = 0;
12308         u32 pagesize = tp->nvram_pagesize;
12309         u32 pagemask = pagesize - 1;
12310         u32 nvram_cmd;
12311         u8 *tmp;
12312
12313         tmp = kmalloc(pagesize, GFP_KERNEL);
12314         if (tmp == NULL)
12315                 return -ENOMEM;
12316
12317         while (len) {
12318                 int j;
12319                 u32 phy_addr, page_off, size;
12320
12321                 phy_addr = offset & ~pagemask;
12322
12323                 for (j = 0; j < pagesize; j += 4) {
12324                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12325                                                   (__be32 *) (tmp + j));
12326                         if (ret)
12327                                 break;
12328                 }
12329                 if (ret)
12330                         break;
12331
12332                 page_off = offset & pagemask;
12333                 size = pagesize;
12334                 if (len < size)
12335                         size = len;
12336
12337                 len -= size;
12338
12339                 memcpy(tmp + page_off, buf, size);
12340
12341                 offset = offset + (pagesize - page_off);
12342
12343                 tg3_enable_nvram_access(tp);
12344
12345                 /*
12346                  * Before we can erase the flash page, we need
12347                  * to issue a special "write enable" command.
12348                  */
12349                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12350
12351                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12352                         break;
12353
12354                 /* Erase the target page */
12355                 tw32(NVRAM_ADDR, phy_addr);
12356
12357                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12358                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12359
12360                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12361                         break;
12362
12363                 /* Issue another write enable to start the write. */
12364                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12365
12366                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12367                         break;
12368
12369                 for (j = 0; j < pagesize; j += 4) {
12370                         __be32 data;
12371
12372                         data = *((__be32 *) (tmp + j));
12373
12374                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12375
12376                         tw32(NVRAM_ADDR, phy_addr + j);
12377
12378                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12379                                 NVRAM_CMD_WR;
12380
12381                         if (j == 0)
12382                                 nvram_cmd |= NVRAM_CMD_FIRST;
12383                         else if (j == (pagesize - 4))
12384                                 nvram_cmd |= NVRAM_CMD_LAST;
12385
12386                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12387                                 break;
12388                 }
12389                 if (ret)
12390                         break;
12391         }
12392
12393         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12394         tg3_nvram_exec_cmd(tp, nvram_cmd);
12395
12396         kfree(tmp);
12397
12398         return ret;
12399 }
12400
12401 /* offset and length are dword aligned */
12402 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12403                 u8 *buf)
12404 {
12405         int i, ret = 0;
12406
12407         for (i = 0; i < len; i += 4, offset += 4) {
12408                 u32 page_off, phy_addr, nvram_cmd;
12409                 __be32 data;
12410
12411                 memcpy(&data, buf + i, 4);
12412                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12413
12414                 page_off = offset % tp->nvram_pagesize;
12415
12416                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12417
12418                 tw32(NVRAM_ADDR, phy_addr);
12419
12420                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12421
12422                 if (page_off == 0 || i == 0)
12423                         nvram_cmd |= NVRAM_CMD_FIRST;
12424                 if (page_off == (tp->nvram_pagesize - 4))
12425                         nvram_cmd |= NVRAM_CMD_LAST;
12426
12427                 if (i == (len - 4))
12428                         nvram_cmd |= NVRAM_CMD_LAST;
12429
12430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12431                     !tg3_flag(tp, 5755_PLUS) &&
12432                     (tp->nvram_jedecnum == JEDEC_ST) &&
12433                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12434
12435                         if ((ret = tg3_nvram_exec_cmd(tp,
12436                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12437                                 NVRAM_CMD_DONE)))
12438
12439                                 break;
12440                 }
12441                 if (!tg3_flag(tp, FLASH)) {
12442                         /* We always do complete word writes to eeprom. */
12443                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12444                 }
12445
12446                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12447                         break;
12448         }
12449         return ret;
12450 }
12451
12452 /* offset and length are dword aligned */
12453 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12454 {
12455         int ret;
12456
12457         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12458                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12459                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12460                 udelay(40);
12461         }
12462
12463         if (!tg3_flag(tp, NVRAM)) {
12464                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12465         } else {
12466                 u32 grc_mode;
12467
12468                 ret = tg3_nvram_lock(tp);
12469                 if (ret)
12470                         return ret;
12471
12472                 tg3_enable_nvram_access(tp);
12473                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12474                         tw32(NVRAM_WRITE1, 0x406);
12475
12476                 grc_mode = tr32(GRC_MODE);
12477                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12478
12479                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12480                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12481                                 buf);
12482                 } else {
12483                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12484                                 buf);
12485                 }
12486
12487                 grc_mode = tr32(GRC_MODE);
12488                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12489
12490                 tg3_disable_nvram_access(tp);
12491                 tg3_nvram_unlock(tp);
12492         }
12493
12494         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12495                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12496                 udelay(40);
12497         }
12498
12499         return ret;
12500 }
12501
12502 struct subsys_tbl_ent {
12503         u16 subsys_vendor, subsys_devid;
12504         u32 phy_id;
12505 };
12506
12507 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12508         /* Broadcom boards. */
12509         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12511         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12513         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12515         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12517         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12519         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12521         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12523         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12525         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12526           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12527         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12528           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12529         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12530           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12531
12532         /* 3com boards. */
12533         { TG3PCI_SUBVENDOR_ID_3COM,
12534           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12535         { TG3PCI_SUBVENDOR_ID_3COM,
12536           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12537         { TG3PCI_SUBVENDOR_ID_3COM,
12538           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12539         { TG3PCI_SUBVENDOR_ID_3COM,
12540           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12541         { TG3PCI_SUBVENDOR_ID_3COM,
12542           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12543
12544         /* DELL boards. */
12545         { TG3PCI_SUBVENDOR_ID_DELL,
12546           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12547         { TG3PCI_SUBVENDOR_ID_DELL,
12548           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12549         { TG3PCI_SUBVENDOR_ID_DELL,
12550           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12551         { TG3PCI_SUBVENDOR_ID_DELL,
12552           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12553
12554         /* Compaq boards. */
12555         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12557         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12559         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12560           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12561         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12562           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12563         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12564           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12565
12566         /* IBM boards. */
12567         { TG3PCI_SUBVENDOR_ID_IBM,
12568           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12569 };
12570
12571 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12572 {
12573         int i;
12574
12575         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12576                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12577                      tp->pdev->subsystem_vendor) &&
12578                     (subsys_id_to_phy_id[i].subsys_devid ==
12579                      tp->pdev->subsystem_device))
12580                         return &subsys_id_to_phy_id[i];
12581         }
12582         return NULL;
12583 }
12584
12585 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12586 {
12587         u32 val;
12588         u16 pmcsr;
12589
12590         /* On some early chips the SRAM cannot be accessed in D3hot state,
12591          * so need make sure we're in D0.
12592          */
12593         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12594         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12595         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12596         msleep(1);
12597
12598         /* Make sure register accesses (indirect or otherwise)
12599          * will function correctly.
12600          */
12601         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12602                                tp->misc_host_ctrl);
12603
12604         /* The memory arbiter has to be enabled in order for SRAM accesses
12605          * to succeed.  Normally on powerup the tg3 chip firmware will make
12606          * sure it is enabled, but other entities such as system netboot
12607          * code might disable it.
12608          */
12609         val = tr32(MEMARB_MODE);
12610         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12611
12612         tp->phy_id = TG3_PHY_ID_INVALID;
12613         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12614
12615         /* Assume an onboard device and WOL capable by default.  */
12616         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12617         tg3_flag_set(tp, WOL_CAP);
12618
12619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12620                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12621                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12622                         tg3_flag_set(tp, IS_NIC);
12623                 }
12624                 val = tr32(VCPU_CFGSHDW);
12625                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12626                         tg3_flag_set(tp, ASPM_WORKAROUND);
12627                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12628                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12629                         tg3_flag_set(tp, WOL_ENABLE);
12630                         device_set_wakeup_enable(&tp->pdev->dev, true);
12631                 }
12632                 goto done;
12633         }
12634
12635         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12636         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12637                 u32 nic_cfg, led_cfg;
12638                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12639                 int eeprom_phy_serdes = 0;
12640
12641                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12642                 tp->nic_sram_data_cfg = nic_cfg;
12643
12644                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12645                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12646                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12647                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12648                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12649                     (ver > 0) && (ver < 0x100))
12650                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12651
12652                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12653                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12654
12655                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12656                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12657                         eeprom_phy_serdes = 1;
12658
12659                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12660                 if (nic_phy_id != 0) {
12661                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12662                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12663
12664                         eeprom_phy_id  = (id1 >> 16) << 10;
12665                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12666                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12667                 } else
12668                         eeprom_phy_id = 0;
12669
12670                 tp->phy_id = eeprom_phy_id;
12671                 if (eeprom_phy_serdes) {
12672                         if (!tg3_flag(tp, 5705_PLUS))
12673                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12674                         else
12675                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12676                 }
12677
12678                 if (tg3_flag(tp, 5750_PLUS))
12679                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12680                                     SHASTA_EXT_LED_MODE_MASK);
12681                 else
12682                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12683
12684                 switch (led_cfg) {
12685                 default:
12686                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12687                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12688                         break;
12689
12690                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12691                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12692                         break;
12693
12694                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12695                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12696
12697                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12698                          * read on some older 5700/5701 bootcode.
12699                          */
12700                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12701                             ASIC_REV_5700 ||
12702                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12703                             ASIC_REV_5701)
12704                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12705
12706                         break;
12707
12708                 case SHASTA_EXT_LED_SHARED:
12709                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12710                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12711                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12712                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12713                                                  LED_CTRL_MODE_PHY_2);
12714                         break;
12715
12716                 case SHASTA_EXT_LED_MAC:
12717                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12718                         break;
12719
12720                 case SHASTA_EXT_LED_COMBO:
12721                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12722                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12723                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12724                                                  LED_CTRL_MODE_PHY_2);
12725                         break;
12726
12727                 }
12728
12729                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12730                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12731                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12732                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12733
12734                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12735                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12736
12737                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12738                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12739                         if ((tp->pdev->subsystem_vendor ==
12740                              PCI_VENDOR_ID_ARIMA) &&
12741                             (tp->pdev->subsystem_device == 0x205a ||
12742                              tp->pdev->subsystem_device == 0x2063))
12743                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12744                 } else {
12745                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12746                         tg3_flag_set(tp, IS_NIC);
12747                 }
12748
12749                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12750                         tg3_flag_set(tp, ENABLE_ASF);
12751                         if (tg3_flag(tp, 5750_PLUS))
12752                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12753                 }
12754
12755                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12756                     tg3_flag(tp, 5750_PLUS))
12757                         tg3_flag_set(tp, ENABLE_APE);
12758
12759                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12760                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12761                         tg3_flag_clear(tp, WOL_CAP);
12762
12763                 if (tg3_flag(tp, WOL_CAP) &&
12764                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12765                         tg3_flag_set(tp, WOL_ENABLE);
12766                         device_set_wakeup_enable(&tp->pdev->dev, true);
12767                 }
12768
12769                 if (cfg2 & (1 << 17))
12770                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12771
12772                 /* serdes signal pre-emphasis in register 0x590 set by */
12773                 /* bootcode if bit 18 is set */
12774                 if (cfg2 & (1 << 18))
12775                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12776
12777                 if ((tg3_flag(tp, 57765_PLUS) ||
12778                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12779                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12780                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12781                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12782
12783                 if (tg3_flag(tp, PCI_EXPRESS) &&
12784                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12785                     !tg3_flag(tp, 57765_PLUS)) {
12786                         u32 cfg3;
12787
12788                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12789                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12790                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12791                 }
12792
12793                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12794                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12795                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12796                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12797                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12798                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12799         }
12800 done:
12801         if (tg3_flag(tp, WOL_CAP))
12802                 device_set_wakeup_enable(&tp->pdev->dev,
12803                                          tg3_flag(tp, WOL_ENABLE));
12804         else
12805                 device_set_wakeup_capable(&tp->pdev->dev, false);
12806 }
12807
12808 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12809 {
12810         int i;
12811         u32 val;
12812
12813         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12814         tw32(OTP_CTRL, cmd);
12815
12816         /* Wait for up to 1 ms for command to execute. */
12817         for (i = 0; i < 100; i++) {
12818                 val = tr32(OTP_STATUS);
12819                 if (val & OTP_STATUS_CMD_DONE)
12820                         break;
12821                 udelay(10);
12822         }
12823
12824         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12825 }
12826
12827 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12828  * configuration is a 32-bit value that straddles the alignment boundary.
12829  * We do two 32-bit reads and then shift and merge the results.
12830  */
12831 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12832 {
12833         u32 bhalf_otp, thalf_otp;
12834
12835         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12836
12837         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12838                 return 0;
12839
12840         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12841
12842         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12843                 return 0;
12844
12845         thalf_otp = tr32(OTP_READ_DATA);
12846
12847         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12848
12849         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12850                 return 0;
12851
12852         bhalf_otp = tr32(OTP_READ_DATA);
12853
12854         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12855 }
12856
12857 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12858 {
12859         u32 adv = ADVERTISED_Autoneg |
12860                   ADVERTISED_Pause;
12861
12862         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12863                 adv |= ADVERTISED_1000baseT_Half |
12864                        ADVERTISED_1000baseT_Full;
12865
12866         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12867                 adv |= ADVERTISED_100baseT_Half |
12868                        ADVERTISED_100baseT_Full |
12869                        ADVERTISED_10baseT_Half |
12870                        ADVERTISED_10baseT_Full |
12871                        ADVERTISED_TP;
12872         else
12873                 adv |= ADVERTISED_FIBRE;
12874
12875         tp->link_config.advertising = adv;
12876         tp->link_config.speed = SPEED_INVALID;
12877         tp->link_config.duplex = DUPLEX_INVALID;
12878         tp->link_config.autoneg = AUTONEG_ENABLE;
12879         tp->link_config.active_speed = SPEED_INVALID;
12880         tp->link_config.active_duplex = DUPLEX_INVALID;
12881         tp->link_config.orig_speed = SPEED_INVALID;
12882         tp->link_config.orig_duplex = DUPLEX_INVALID;
12883         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12884 }
12885
12886 static int __devinit tg3_phy_probe(struct tg3 *tp)
12887 {
12888         u32 hw_phy_id_1, hw_phy_id_2;
12889         u32 hw_phy_id, hw_phy_id_masked;
12890         int err;
12891
12892         /* flow control autonegotiation is default behavior */
12893         tg3_flag_set(tp, PAUSE_AUTONEG);
12894         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12895
12896         if (tg3_flag(tp, USE_PHYLIB))
12897                 return tg3_phy_init(tp);
12898
12899         /* Reading the PHY ID register can conflict with ASF
12900          * firmware access to the PHY hardware.
12901          */
12902         err = 0;
12903         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12904                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12905         } else {
12906                 /* Now read the physical PHY_ID from the chip and verify
12907                  * that it is sane.  If it doesn't look good, we fall back
12908                  * to either the hard-coded table based PHY_ID and failing
12909                  * that the value found in the eeprom area.
12910                  */
12911                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12912                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12913
12914                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12915                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12916                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12917
12918                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12919         }
12920
12921         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12922                 tp->phy_id = hw_phy_id;
12923                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12924                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12925                 else
12926                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12927         } else {
12928                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12929                         /* Do nothing, phy ID already set up in
12930                          * tg3_get_eeprom_hw_cfg().
12931                          */
12932                 } else {
12933                         struct subsys_tbl_ent *p;
12934
12935                         /* No eeprom signature?  Try the hardcoded
12936                          * subsys device table.
12937                          */
12938                         p = tg3_lookup_by_subsys(tp);
12939                         if (!p)
12940                                 return -ENODEV;
12941
12942                         tp->phy_id = p->phy_id;
12943                         if (!tp->phy_id ||
12944                             tp->phy_id == TG3_PHY_ID_BCM8002)
12945                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12946                 }
12947         }
12948
12949         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12950             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12951               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12952              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12953               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12954                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12955
12956         tg3_phy_init_link_config(tp);
12957
12958         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12959             !tg3_flag(tp, ENABLE_APE) &&
12960             !tg3_flag(tp, ENABLE_ASF)) {
12961                 u32 bmsr, mask;
12962
12963                 tg3_readphy(tp, MII_BMSR, &bmsr);
12964                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12965                     (bmsr & BMSR_LSTATUS))
12966                         goto skip_phy_reset;
12967
12968                 err = tg3_phy_reset(tp);
12969                 if (err)
12970                         return err;
12971
12972                 tg3_phy_set_wirespeed(tp);
12973
12974                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12975                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12976                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12977                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12978                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12979                                             tp->link_config.flowctrl);
12980
12981                         tg3_writephy(tp, MII_BMCR,
12982                                      BMCR_ANENABLE | BMCR_ANRESTART);
12983                 }
12984         }
12985
12986 skip_phy_reset:
12987         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12988                 err = tg3_init_5401phy_dsp(tp);
12989                 if (err)
12990                         return err;
12991
12992                 err = tg3_init_5401phy_dsp(tp);
12993         }
12994
12995         return err;
12996 }
12997
12998 static void __devinit tg3_read_vpd(struct tg3 *tp)
12999 {
13000         u8 *vpd_data;
13001         unsigned int block_end, rosize, len;
13002         int j, i = 0;
13003
13004         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13005         if (!vpd_data)
13006                 goto out_no_vpd;
13007
13008         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13009                              PCI_VPD_LRDT_RO_DATA);
13010         if (i < 0)
13011                 goto out_not_found;
13012
13013         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13014         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13015         i += PCI_VPD_LRDT_TAG_SIZE;
13016
13017         if (block_end > TG3_NVM_VPD_LEN)
13018                 goto out_not_found;
13019
13020         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13021                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13022         if (j > 0) {
13023                 len = pci_vpd_info_field_size(&vpd_data[j]);
13024
13025                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13026                 if (j + len > block_end || len != 4 ||
13027                     memcmp(&vpd_data[j], "1028", 4))
13028                         goto partno;
13029
13030                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13031                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13032                 if (j < 0)
13033                         goto partno;
13034
13035                 len = pci_vpd_info_field_size(&vpd_data[j]);
13036
13037                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13038                 if (j + len > block_end)
13039                         goto partno;
13040
13041                 memcpy(tp->fw_ver, &vpd_data[j], len);
13042                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13043         }
13044
13045 partno:
13046         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13047                                       PCI_VPD_RO_KEYWORD_PARTNO);
13048         if (i < 0)
13049                 goto out_not_found;
13050
13051         len = pci_vpd_info_field_size(&vpd_data[i]);
13052
13053         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13054         if (len > TG3_BPN_SIZE ||
13055             (len + i) > TG3_NVM_VPD_LEN)
13056                 goto out_not_found;
13057
13058         memcpy(tp->board_part_number, &vpd_data[i], len);
13059
13060 out_not_found:
13061         kfree(vpd_data);
13062         if (tp->board_part_number[0])
13063                 return;
13064
13065 out_no_vpd:
13066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13067                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13068                         strcpy(tp->board_part_number, "BCM5717");
13069                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13070                         strcpy(tp->board_part_number, "BCM5718");
13071                 else
13072                         goto nomatch;
13073         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13074                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13075                         strcpy(tp->board_part_number, "BCM57780");
13076                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13077                         strcpy(tp->board_part_number, "BCM57760");
13078                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13079                         strcpy(tp->board_part_number, "BCM57790");
13080                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13081                         strcpy(tp->board_part_number, "BCM57788");
13082                 else
13083                         goto nomatch;
13084         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13085                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13086                         strcpy(tp->board_part_number, "BCM57761");
13087                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13088                         strcpy(tp->board_part_number, "BCM57765");
13089                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13090                         strcpy(tp->board_part_number, "BCM57781");
13091                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13092                         strcpy(tp->board_part_number, "BCM57785");
13093                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13094                         strcpy(tp->board_part_number, "BCM57791");
13095                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13096                         strcpy(tp->board_part_number, "BCM57795");
13097                 else
13098                         goto nomatch;
13099         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13100                 strcpy(tp->board_part_number, "BCM95906");
13101         } else {
13102 nomatch:
13103                 strcpy(tp->board_part_number, "none");
13104         }
13105 }
13106
13107 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13108 {
13109         u32 val;
13110
13111         if (tg3_nvram_read(tp, offset, &val) ||
13112             (val & 0xfc000000) != 0x0c000000 ||
13113             tg3_nvram_read(tp, offset + 4, &val) ||
13114             val != 0)
13115                 return 0;
13116
13117         return 1;
13118 }
13119
13120 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13121 {
13122         u32 val, offset, start, ver_offset;
13123         int i, dst_off;
13124         bool newver = false;
13125
13126         if (tg3_nvram_read(tp, 0xc, &offset) ||
13127             tg3_nvram_read(tp, 0x4, &start))
13128                 return;
13129
13130         offset = tg3_nvram_logical_addr(tp, offset);
13131
13132         if (tg3_nvram_read(tp, offset, &val))
13133                 return;
13134
13135         if ((val & 0xfc000000) == 0x0c000000) {
13136                 if (tg3_nvram_read(tp, offset + 4, &val))
13137                         return;
13138
13139                 if (val == 0)
13140                         newver = true;
13141         }
13142
13143         dst_off = strlen(tp->fw_ver);
13144
13145         if (newver) {
13146                 if (TG3_VER_SIZE - dst_off < 16 ||
13147                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13148                         return;
13149
13150                 offset = offset + ver_offset - start;
13151                 for (i = 0; i < 16; i += 4) {
13152                         __be32 v;
13153                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13154                                 return;
13155
13156                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13157                 }
13158         } else {
13159                 u32 major, minor;
13160
13161                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13162                         return;
13163
13164                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13165                         TG3_NVM_BCVER_MAJSFT;
13166                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13167                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13168                          "v%d.%02d", major, minor);
13169         }
13170 }
13171
13172 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13173 {
13174         u32 val, major, minor;
13175
13176         /* Use native endian representation */
13177         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13178                 return;
13179
13180         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13181                 TG3_NVM_HWSB_CFG1_MAJSFT;
13182         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13183                 TG3_NVM_HWSB_CFG1_MINSFT;
13184
13185         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13186 }
13187
13188 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13189 {
13190         u32 offset, major, minor, build;
13191
13192         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13193
13194         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13195                 return;
13196
13197         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13198         case TG3_EEPROM_SB_REVISION_0:
13199                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13200                 break;
13201         case TG3_EEPROM_SB_REVISION_2:
13202                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13203                 break;
13204         case TG3_EEPROM_SB_REVISION_3:
13205                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13206                 break;
13207         case TG3_EEPROM_SB_REVISION_4:
13208                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13209                 break;
13210         case TG3_EEPROM_SB_REVISION_5:
13211                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13212                 break;
13213         case TG3_EEPROM_SB_REVISION_6:
13214                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13215                 break;
13216         default:
13217                 return;
13218         }
13219
13220         if (tg3_nvram_read(tp, offset, &val))
13221                 return;
13222
13223         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13224                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13225         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13226                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13227         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13228
13229         if (minor > 99 || build > 26)
13230                 return;
13231
13232         offset = strlen(tp->fw_ver);
13233         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13234                  " v%d.%02d", major, minor);
13235
13236         if (build > 0) {
13237                 offset = strlen(tp->fw_ver);
13238                 if (offset < TG3_VER_SIZE - 1)
13239                         tp->fw_ver[offset] = 'a' + build - 1;
13240         }
13241 }
13242
13243 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13244 {
13245         u32 val, offset, start;
13246         int i, vlen;
13247
13248         for (offset = TG3_NVM_DIR_START;
13249              offset < TG3_NVM_DIR_END;
13250              offset += TG3_NVM_DIRENT_SIZE) {
13251                 if (tg3_nvram_read(tp, offset, &val))
13252                         return;
13253
13254                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13255                         break;
13256         }
13257
13258         if (offset == TG3_NVM_DIR_END)
13259                 return;
13260
13261         if (!tg3_flag(tp, 5705_PLUS))
13262                 start = 0x08000000;
13263         else if (tg3_nvram_read(tp, offset - 4, &start))
13264                 return;
13265
13266         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13267             !tg3_fw_img_is_valid(tp, offset) ||
13268             tg3_nvram_read(tp, offset + 8, &val))
13269                 return;
13270
13271         offset += val - start;
13272
13273         vlen = strlen(tp->fw_ver);
13274
13275         tp->fw_ver[vlen++] = ',';
13276         tp->fw_ver[vlen++] = ' ';
13277
13278         for (i = 0; i < 4; i++) {
13279                 __be32 v;
13280                 if (tg3_nvram_read_be32(tp, offset, &v))
13281                         return;
13282
13283                 offset += sizeof(v);
13284
13285                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13286                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13287                         break;
13288                 }
13289
13290                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13291                 vlen += sizeof(v);
13292         }
13293 }
13294
13295 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13296 {
13297         int vlen;
13298         u32 apedata;
13299         char *fwtype;
13300
13301         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13302                 return;
13303
13304         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13305         if (apedata != APE_SEG_SIG_MAGIC)
13306                 return;
13307
13308         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13309         if (!(apedata & APE_FW_STATUS_READY))
13310                 return;
13311
13312         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13313
13314         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13315                 tg3_flag_set(tp, APE_HAS_NCSI);
13316                 fwtype = "NCSI";
13317         } else {
13318                 fwtype = "DASH";
13319         }
13320
13321         vlen = strlen(tp->fw_ver);
13322
13323         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13324                  fwtype,
13325                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13326                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13327                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13328                  (apedata & APE_FW_VERSION_BLDMSK));
13329 }
13330
13331 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13332 {
13333         u32 val;
13334         bool vpd_vers = false;
13335
13336         if (tp->fw_ver[0] != 0)
13337                 vpd_vers = true;
13338
13339         if (tg3_flag(tp, NO_NVRAM)) {
13340                 strcat(tp->fw_ver, "sb");
13341                 return;
13342         }
13343
13344         if (tg3_nvram_read(tp, 0, &val))
13345                 return;
13346
13347         if (val == TG3_EEPROM_MAGIC)
13348                 tg3_read_bc_ver(tp);
13349         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13350                 tg3_read_sb_ver(tp, val);
13351         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13352                 tg3_read_hwsb_ver(tp);
13353         else
13354                 return;
13355
13356         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13357                 goto done;
13358
13359         tg3_read_mgmtfw_ver(tp);
13360
13361 done:
13362         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13363 }
13364
13365 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13366
13367 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13368 {
13369         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13370                 return TG3_RX_RET_MAX_SIZE_5717;
13371         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13372                 return TG3_RX_RET_MAX_SIZE_5700;
13373         else
13374                 return TG3_RX_RET_MAX_SIZE_5705;
13375 }
13376
13377 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13378         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13379         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13380         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13381         { },
13382 };
13383
13384 static int __devinit tg3_get_invariants(struct tg3 *tp)
13385 {
13386         u32 misc_ctrl_reg;
13387         u32 pci_state_reg, grc_misc_cfg;
13388         u32 val;
13389         u16 pci_cmd;
13390         int err;
13391
13392         /* Force memory write invalidate off.  If we leave it on,
13393          * then on 5700_BX chips we have to enable a workaround.
13394          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13395          * to match the cacheline size.  The Broadcom driver have this
13396          * workaround but turns MWI off all the times so never uses
13397          * it.  This seems to suggest that the workaround is insufficient.
13398          */
13399         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13400         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13401         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13402
13403         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13404          * has the register indirect write enable bit set before
13405          * we try to access any of the MMIO registers.  It is also
13406          * critical that the PCI-X hw workaround situation is decided
13407          * before that as well.
13408          */
13409         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13410                               &misc_ctrl_reg);
13411
13412         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13413                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13415                 u32 prod_id_asic_rev;
13416
13417                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13418                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13419                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13420                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13421                         pci_read_config_dword(tp->pdev,
13422                                               TG3PCI_GEN2_PRODID_ASICREV,
13423                                               &prod_id_asic_rev);
13424                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13425                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13426                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13427                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13428                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13429                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13430                         pci_read_config_dword(tp->pdev,
13431                                               TG3PCI_GEN15_PRODID_ASICREV,
13432                                               &prod_id_asic_rev);
13433                 else
13434                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13435                                               &prod_id_asic_rev);
13436
13437                 tp->pci_chip_rev_id = prod_id_asic_rev;
13438         }
13439
13440         /* Wrong chip ID in 5752 A0. This code can be removed later
13441          * as A0 is not in production.
13442          */
13443         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13444                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13445
13446         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13447          * we need to disable memory and use config. cycles
13448          * only to access all registers. The 5702/03 chips
13449          * can mistakenly decode the special cycles from the
13450          * ICH chipsets as memory write cycles, causing corruption
13451          * of register and memory space. Only certain ICH bridges
13452          * will drive special cycles with non-zero data during the
13453          * address phase which can fall within the 5703's address
13454          * range. This is not an ICH bug as the PCI spec allows
13455          * non-zero address during special cycles. However, only
13456          * these ICH bridges are known to drive non-zero addresses
13457          * during special cycles.
13458          *
13459          * Since special cycles do not cross PCI bridges, we only
13460          * enable this workaround if the 5703 is on the secondary
13461          * bus of these ICH bridges.
13462          */
13463         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13464             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13465                 static struct tg3_dev_id {
13466                         u32     vendor;
13467                         u32     device;
13468                         u32     rev;
13469                 } ich_chipsets[] = {
13470                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13471                           PCI_ANY_ID },
13472                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13473                           PCI_ANY_ID },
13474                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13475                           0xa },
13476                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13477                           PCI_ANY_ID },
13478                         { },
13479                 };
13480                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13481                 struct pci_dev *bridge = NULL;
13482
13483                 while (pci_id->vendor != 0) {
13484                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13485                                                 bridge);
13486                         if (!bridge) {
13487                                 pci_id++;
13488                                 continue;
13489                         }
13490                         if (pci_id->rev != PCI_ANY_ID) {
13491                                 if (bridge->revision > pci_id->rev)
13492                                         continue;
13493                         }
13494                         if (bridge->subordinate &&
13495                             (bridge->subordinate->number ==
13496                              tp->pdev->bus->number)) {
13497                                 tg3_flag_set(tp, ICH_WORKAROUND);
13498                                 pci_dev_put(bridge);
13499                                 break;
13500                         }
13501                 }
13502         }
13503
13504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13505                 static struct tg3_dev_id {
13506                         u32     vendor;
13507                         u32     device;
13508                 } bridge_chipsets[] = {
13509                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13510                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13511                         { },
13512                 };
13513                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13514                 struct pci_dev *bridge = NULL;
13515
13516                 while (pci_id->vendor != 0) {
13517                         bridge = pci_get_device(pci_id->vendor,
13518                                                 pci_id->device,
13519                                                 bridge);
13520                         if (!bridge) {
13521                                 pci_id++;
13522                                 continue;
13523                         }
13524                         if (bridge->subordinate &&
13525                             (bridge->subordinate->number <=
13526                              tp->pdev->bus->number) &&
13527                             (bridge->subordinate->subordinate >=
13528                              tp->pdev->bus->number)) {
13529                                 tg3_flag_set(tp, 5701_DMA_BUG);
13530                                 pci_dev_put(bridge);
13531                                 break;
13532                         }
13533                 }
13534         }
13535
13536         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13537          * DMA addresses > 40-bit. This bridge may have other additional
13538          * 57xx devices behind it in some 4-port NIC designs for example.
13539          * Any tg3 device found behind the bridge will also need the 40-bit
13540          * DMA workaround.
13541          */
13542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13543             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13544                 tg3_flag_set(tp, 5780_CLASS);
13545                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13546                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13547         } else {
13548                 struct pci_dev *bridge = NULL;
13549
13550                 do {
13551                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13552                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13553                                                 bridge);
13554                         if (bridge && bridge->subordinate &&
13555                             (bridge->subordinate->number <=
13556                              tp->pdev->bus->number) &&
13557                             (bridge->subordinate->subordinate >=
13558                              tp->pdev->bus->number)) {
13559                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13560                                 pci_dev_put(bridge);
13561                                 break;
13562                         }
13563                 } while (bridge);
13564         }
13565
13566         /* Initialize misc host control in PCI block. */
13567         tp->misc_host_ctrl |= (misc_ctrl_reg &
13568                                MISC_HOST_CTRL_CHIPREV);
13569         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13570                                tp->misc_host_ctrl);
13571
13572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13573             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13576                 tp->pdev_peer = tg3_find_peer(tp);
13577
13578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13581                 tg3_flag_set(tp, 5717_PLUS);
13582
13583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13584             tg3_flag(tp, 5717_PLUS))
13585                 tg3_flag_set(tp, 57765_PLUS);
13586
13587         /* Intentionally exclude ASIC_REV_5906 */
13588         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13589             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13594             tg3_flag(tp, 57765_PLUS))
13595                 tg3_flag_set(tp, 5755_PLUS);
13596
13597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13599             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13600             tg3_flag(tp, 5755_PLUS) ||
13601             tg3_flag(tp, 5780_CLASS))
13602                 tg3_flag_set(tp, 5750_PLUS);
13603
13604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13605             tg3_flag(tp, 5750_PLUS))
13606                 tg3_flag_set(tp, 5705_PLUS);
13607
13608         /* Determine TSO capabilities */
13609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13610                 ; /* Do nothing. HW bug. */
13611         else if (tg3_flag(tp, 57765_PLUS))
13612                 tg3_flag_set(tp, HW_TSO_3);
13613         else if (tg3_flag(tp, 5755_PLUS) ||
13614                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13615                 tg3_flag_set(tp, HW_TSO_2);
13616         else if (tg3_flag(tp, 5750_PLUS)) {
13617                 tg3_flag_set(tp, HW_TSO_1);
13618                 tg3_flag_set(tp, TSO_BUG);
13619                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13620                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13621                         tg3_flag_clear(tp, TSO_BUG);
13622         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13623                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13624                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13625                         tg3_flag_set(tp, TSO_BUG);
13626                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13627                         tp->fw_needed = FIRMWARE_TG3TSO5;
13628                 else
13629                         tp->fw_needed = FIRMWARE_TG3TSO;
13630         }
13631
13632         /* Selectively allow TSO based on operating conditions */
13633         if (tg3_flag(tp, HW_TSO_1) ||
13634             tg3_flag(tp, HW_TSO_2) ||
13635             tg3_flag(tp, HW_TSO_3) ||
13636             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13637                 tg3_flag_set(tp, TSO_CAPABLE);
13638         else {
13639                 tg3_flag_clear(tp, TSO_CAPABLE);
13640                 tg3_flag_clear(tp, TSO_BUG);
13641                 tp->fw_needed = NULL;
13642         }
13643
13644         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13645                 tp->fw_needed = FIRMWARE_TG3;
13646
13647         tp->irq_max = 1;
13648
13649         if (tg3_flag(tp, 5750_PLUS)) {
13650                 tg3_flag_set(tp, SUPPORT_MSI);
13651                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13652                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13653                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13654                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13655                      tp->pdev_peer == tp->pdev))
13656                         tg3_flag_clear(tp, SUPPORT_MSI);
13657
13658                 if (tg3_flag(tp, 5755_PLUS) ||
13659                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13660                         tg3_flag_set(tp, 1SHOT_MSI);
13661                 }
13662
13663                 if (tg3_flag(tp, 57765_PLUS)) {
13664                         tg3_flag_set(tp, SUPPORT_MSIX);
13665                         tp->irq_max = TG3_IRQ_MAX_VECS;
13666                 }
13667         }
13668
13669         /* All chips can get confused if TX buffers
13670          * straddle the 4GB address boundary.
13671          */
13672         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13673
13674         if (tg3_flag(tp, 5755_PLUS))
13675                 tg3_flag_set(tp, SHORT_DMA_BUG);
13676         else
13677                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13678
13679         if (tg3_flag(tp, 5717_PLUS))
13680                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13681
13682         if (tg3_flag(tp, 57765_PLUS) &&
13683             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13684                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13685
13686         if (!tg3_flag(tp, 5705_PLUS) ||
13687             tg3_flag(tp, 5780_CLASS) ||
13688             tg3_flag(tp, USE_JUMBO_BDFLAG))
13689                 tg3_flag_set(tp, JUMBO_CAPABLE);
13690
13691         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13692                               &pci_state_reg);
13693
13694         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13695         if (tp->pcie_cap != 0) {
13696                 u16 lnkctl;
13697
13698                 tg3_flag_set(tp, PCI_EXPRESS);
13699
13700                 tp->pcie_readrq = 4096;
13701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13702                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13703                         tp->pcie_readrq = 2048;
13704
13705                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13706
13707                 pci_read_config_word(tp->pdev,
13708                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13709                                      &lnkctl);
13710                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13711                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13712                             ASIC_REV_5906) {
13713                                 tg3_flag_clear(tp, HW_TSO_2);
13714                                 tg3_flag_clear(tp, TSO_CAPABLE);
13715                         }
13716                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13717                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13718                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13719                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13720                                 tg3_flag_set(tp, CLKREQ_BUG);
13721                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13722                         tg3_flag_set(tp, L1PLLPD_EN);
13723                 }
13724         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13725                 tg3_flag_set(tp, PCI_EXPRESS);
13726         } else if (!tg3_flag(tp, 5705_PLUS) ||
13727                    tg3_flag(tp, 5780_CLASS)) {
13728                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13729                 if (!tp->pcix_cap) {
13730                         dev_err(&tp->pdev->dev,
13731                                 "Cannot find PCI-X capability, aborting\n");
13732                         return -EIO;
13733                 }
13734
13735                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13736                         tg3_flag_set(tp, PCIX_MODE);
13737         }
13738
13739         /* If we have an AMD 762 or VIA K8T800 chipset, write
13740          * reordering to the mailbox registers done by the host
13741          * controller can cause major troubles.  We read back from
13742          * every mailbox register write to force the writes to be
13743          * posted to the chip in order.
13744          */
13745         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13746             !tg3_flag(tp, PCI_EXPRESS))
13747                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13748
13749         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13750                              &tp->pci_cacheline_sz);
13751         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13752                              &tp->pci_lat_timer);
13753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13754             tp->pci_lat_timer < 64) {
13755                 tp->pci_lat_timer = 64;
13756                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13757                                       tp->pci_lat_timer);
13758         }
13759
13760         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13761                 /* 5700 BX chips need to have their TX producer index
13762                  * mailboxes written twice to workaround a bug.
13763                  */
13764                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13765
13766                 /* If we are in PCI-X mode, enable register write workaround.
13767                  *
13768                  * The workaround is to use indirect register accesses
13769                  * for all chip writes not to mailbox registers.
13770                  */
13771                 if (tg3_flag(tp, PCIX_MODE)) {
13772                         u32 pm_reg;
13773
13774                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13775
13776                         /* The chip can have it's power management PCI config
13777                          * space registers clobbered due to this bug.
13778                          * So explicitly force the chip into D0 here.
13779                          */
13780                         pci_read_config_dword(tp->pdev,
13781                                               tp->pm_cap + PCI_PM_CTRL,
13782                                               &pm_reg);
13783                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13784                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13785                         pci_write_config_dword(tp->pdev,
13786                                                tp->pm_cap + PCI_PM_CTRL,
13787                                                pm_reg);
13788
13789                         /* Also, force SERR#/PERR# in PCI command. */
13790                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13791                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13792                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13793                 }
13794         }
13795
13796         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13797                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13798         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13799                 tg3_flag_set(tp, PCI_32BIT);
13800
13801         /* Chip-specific fixup from Broadcom driver */
13802         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13803             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13804                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13805                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13806         }
13807
13808         /* Default fast path register access methods */
13809         tp->read32 = tg3_read32;
13810         tp->write32 = tg3_write32;
13811         tp->read32_mbox = tg3_read32;
13812         tp->write32_mbox = tg3_write32;
13813         tp->write32_tx_mbox = tg3_write32;
13814         tp->write32_rx_mbox = tg3_write32;
13815
13816         /* Various workaround register access methods */
13817         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13818                 tp->write32 = tg3_write_indirect_reg32;
13819         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13820                  (tg3_flag(tp, PCI_EXPRESS) &&
13821                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13822                 /*
13823                  * Back to back register writes can cause problems on these
13824                  * chips, the workaround is to read back all reg writes
13825                  * except those to mailbox regs.
13826                  *
13827                  * See tg3_write_indirect_reg32().
13828                  */
13829                 tp->write32 = tg3_write_flush_reg32;
13830         }
13831
13832         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13833                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13834                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13835                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13836         }
13837
13838         if (tg3_flag(tp, ICH_WORKAROUND)) {
13839                 tp->read32 = tg3_read_indirect_reg32;
13840                 tp->write32 = tg3_write_indirect_reg32;
13841                 tp->read32_mbox = tg3_read_indirect_mbox;
13842                 tp->write32_mbox = tg3_write_indirect_mbox;
13843                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13844                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13845
13846                 iounmap(tp->regs);
13847                 tp->regs = NULL;
13848
13849                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13850                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13851                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13852         }
13853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13854                 tp->read32_mbox = tg3_read32_mbox_5906;
13855                 tp->write32_mbox = tg3_write32_mbox_5906;
13856                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13857                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13858         }
13859
13860         if (tp->write32 == tg3_write_indirect_reg32 ||
13861             (tg3_flag(tp, PCIX_MODE) &&
13862              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13863               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13864                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13865
13866         /* Get eeprom hw config before calling tg3_set_power_state().
13867          * In particular, the TG3_FLAG_IS_NIC flag must be
13868          * determined before calling tg3_set_power_state() so that
13869          * we know whether or not to switch out of Vaux power.
13870          * When the flag is set, it means that GPIO1 is used for eeprom
13871          * write protect and also implies that it is a LOM where GPIOs
13872          * are not used to switch power.
13873          */
13874         tg3_get_eeprom_hw_cfg(tp);
13875
13876         if (tg3_flag(tp, ENABLE_APE)) {
13877                 /* Allow reads and writes to the
13878                  * APE register and memory space.
13879                  */
13880                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13881                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13882                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13883                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13884                                        pci_state_reg);
13885         }
13886
13887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13890             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13891             tg3_flag(tp, 57765_PLUS))
13892                 tg3_flag_set(tp, CPMU_PRESENT);
13893
13894         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13895          * GPIO1 driven high will bring 5700's external PHY out of reset.
13896          * It is also used as eeprom write protect on LOMs.
13897          */
13898         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13900             tg3_flag(tp, EEPROM_WRITE_PROT))
13901                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13902                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13903         /* Unused GPIO3 must be driven as output on 5752 because there
13904          * are no pull-up resistors on unused GPIO pins.
13905          */
13906         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13907                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13908
13909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13911             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13912                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13913
13914         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13915             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13916                 /* Turn off the debug UART. */
13917                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13918                 if (tg3_flag(tp, IS_NIC))
13919                         /* Keep VMain power. */
13920                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13921                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13922         }
13923
13924         /* Force the chip into D0. */
13925         err = tg3_power_up(tp);
13926         if (err) {
13927                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13928                 return err;
13929         }
13930
13931         /* Derive initial jumbo mode from MTU assigned in
13932          * ether_setup() via the alloc_etherdev() call
13933          */
13934         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13935                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13936
13937         /* Determine WakeOnLan speed to use. */
13938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13939             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13940             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13941             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13942                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13943         } else {
13944                 tg3_flag_set(tp, WOL_SPEED_100MB);
13945         }
13946
13947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13948                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13949
13950         /* A few boards don't want Ethernet@WireSpeed phy feature */
13951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13952             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13953              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13954              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13955             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13956             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13957                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13958
13959         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13960             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13961                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13962         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13963                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13964
13965         if (tg3_flag(tp, 5705_PLUS) &&
13966             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13967             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13968             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13969             !tg3_flag(tp, 57765_PLUS)) {
13970                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13971                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13972                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13974                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13975                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13976                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13977                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13978                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13979                 } else
13980                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13981         }
13982
13983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13984             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13985                 tp->phy_otp = tg3_read_otp_phycfg(tp);
13986                 if (tp->phy_otp == 0)
13987                         tp->phy_otp = TG3_OTP_DEFAULT;
13988         }
13989
13990         if (tg3_flag(tp, CPMU_PRESENT))
13991                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13992         else
13993                 tp->mi_mode = MAC_MI_MODE_BASE;
13994
13995         tp->coalesce_mode = 0;
13996         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13997             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13998                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13999
14000         /* Set these bits to enable statistics workaround. */
14001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14002             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14003             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14004                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14005                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14006         }
14007
14008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14010                 tg3_flag_set(tp, USE_PHYLIB);
14011
14012         err = tg3_mdio_init(tp);
14013         if (err)
14014                 return err;
14015
14016         /* Initialize data/descriptor byte/word swapping. */
14017         val = tr32(GRC_MODE);
14018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14019                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14020                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14021                         GRC_MODE_B2HRX_ENABLE |
14022                         GRC_MODE_HTX2B_ENABLE |
14023                         GRC_MODE_HOST_STACKUP);
14024         else
14025                 val &= GRC_MODE_HOST_STACKUP;
14026
14027         tw32(GRC_MODE, val | tp->grc_mode);
14028
14029         tg3_switch_clocks(tp);
14030
14031         /* Clear this out for sanity. */
14032         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14033
14034         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14035                               &pci_state_reg);
14036         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14037             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14038                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14039
14040                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14041                     chiprevid == CHIPREV_ID_5701_B0 ||
14042                     chiprevid == CHIPREV_ID_5701_B2 ||
14043                     chiprevid == CHIPREV_ID_5701_B5) {
14044                         void __iomem *sram_base;
14045
14046                         /* Write some dummy words into the SRAM status block
14047                          * area, see if it reads back correctly.  If the return
14048                          * value is bad, force enable the PCIX workaround.
14049                          */
14050                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14051
14052                         writel(0x00000000, sram_base);
14053                         writel(0x00000000, sram_base + 4);
14054                         writel(0xffffffff, sram_base + 4);
14055                         if (readl(sram_base) != 0x00000000)
14056                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14057                 }
14058         }
14059
14060         udelay(50);
14061         tg3_nvram_init(tp);
14062
14063         grc_misc_cfg = tr32(GRC_MISC_CFG);
14064         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14065
14066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14067             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14068              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14069                 tg3_flag_set(tp, IS_5788);
14070
14071         if (!tg3_flag(tp, IS_5788) &&
14072             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14073                 tg3_flag_set(tp, TAGGED_STATUS);
14074         if (tg3_flag(tp, TAGGED_STATUS)) {
14075                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14076                                       HOSTCC_MODE_CLRTICK_TXBD);
14077
14078                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14079                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14080                                        tp->misc_host_ctrl);
14081         }
14082
14083         /* Preserve the APE MAC_MODE bits */
14084         if (tg3_flag(tp, ENABLE_APE))
14085                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14086         else
14087                 tp->mac_mode = TG3_DEF_MAC_MODE;
14088
14089         /* these are limited to 10/100 only */
14090         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14091              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14092             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14093              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14094              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14095               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14096               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14097             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14098              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14099               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14100               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14101             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14102             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14103             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14104             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14105                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14106
14107         err = tg3_phy_probe(tp);
14108         if (err) {
14109                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14110                 /* ... but do not return immediately ... */
14111                 tg3_mdio_fini(tp);
14112         }
14113
14114         tg3_read_vpd(tp);
14115         tg3_read_fw_ver(tp);
14116
14117         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14118                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14119         } else {
14120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14121                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14122                 else
14123                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14124         }
14125
14126         /* 5700 {AX,BX} chips have a broken status block link
14127          * change bit implementation, so we must use the
14128          * status register in those cases.
14129          */
14130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14131                 tg3_flag_set(tp, USE_LINKCHG_REG);
14132         else
14133                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14134
14135         /* The led_ctrl is set during tg3_phy_probe, here we might
14136          * have to force the link status polling mechanism based
14137          * upon subsystem IDs.
14138          */
14139         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14140             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14141             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14142                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14143                 tg3_flag_set(tp, USE_LINKCHG_REG);
14144         }
14145
14146         /* For all SERDES we poll the MAC status register. */
14147         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14148                 tg3_flag_set(tp, POLL_SERDES);
14149         else
14150                 tg3_flag_clear(tp, POLL_SERDES);
14151
14152         tp->rx_offset = NET_IP_ALIGN;
14153         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14155             tg3_flag(tp, PCIX_MODE)) {
14156                 tp->rx_offset = 0;
14157 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14158                 tp->rx_copy_thresh = ~(u16)0;
14159 #endif
14160         }
14161
14162         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14163         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14164         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14165
14166         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14167
14168         /* Increment the rx prod index on the rx std ring by at most
14169          * 8 for these chips to workaround hw errata.
14170          */
14171         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14172             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14174                 tp->rx_std_max_post = 8;
14175
14176         if (tg3_flag(tp, ASPM_WORKAROUND))
14177                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14178                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14179
14180         return err;
14181 }
14182
14183 #ifdef CONFIG_SPARC
14184 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14185 {
14186         struct net_device *dev = tp->dev;
14187         struct pci_dev *pdev = tp->pdev;
14188         struct device_node *dp = pci_device_to_OF_node(pdev);
14189         const unsigned char *addr;
14190         int len;
14191
14192         addr = of_get_property(dp, "local-mac-address", &len);
14193         if (addr && len == 6) {
14194                 memcpy(dev->dev_addr, addr, 6);
14195                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14196                 return 0;
14197         }
14198         return -ENODEV;
14199 }
14200
14201 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14202 {
14203         struct net_device *dev = tp->dev;
14204
14205         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14206         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14207         return 0;
14208 }
14209 #endif
14210
14211 static int __devinit tg3_get_device_address(struct tg3 *tp)
14212 {
14213         struct net_device *dev = tp->dev;
14214         u32 hi, lo, mac_offset;
14215         int addr_ok = 0;
14216
14217 #ifdef CONFIG_SPARC
14218         if (!tg3_get_macaddr_sparc(tp))
14219                 return 0;
14220 #endif
14221
14222         mac_offset = 0x7c;
14223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14224             tg3_flag(tp, 5780_CLASS)) {
14225                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14226                         mac_offset = 0xcc;
14227                 if (tg3_nvram_lock(tp))
14228                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14229                 else
14230                         tg3_nvram_unlock(tp);
14231         } else if (tg3_flag(tp, 5717_PLUS)) {
14232                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14233                         mac_offset = 0xcc;
14234                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14235                         mac_offset += 0x18c;
14236         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14237                 mac_offset = 0x10;
14238
14239         /* First try to get it from MAC address mailbox. */
14240         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14241         if ((hi >> 16) == 0x484b) {
14242                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14243                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14244
14245                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14246                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14247                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14248                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14249                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14250
14251                 /* Some old bootcode may report a 0 MAC address in SRAM */
14252                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14253         }
14254         if (!addr_ok) {
14255                 /* Next, try NVRAM. */
14256                 if (!tg3_flag(tp, NO_NVRAM) &&
14257                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14258                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14259                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14260                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14261                 }
14262                 /* Finally just fetch it out of the MAC control regs. */
14263                 else {
14264                         hi = tr32(MAC_ADDR_0_HIGH);
14265                         lo = tr32(MAC_ADDR_0_LOW);
14266
14267                         dev->dev_addr[5] = lo & 0xff;
14268                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14269                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14270                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14271                         dev->dev_addr[1] = hi & 0xff;
14272                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14273                 }
14274         }
14275
14276         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14277 #ifdef CONFIG_SPARC
14278                 if (!tg3_get_default_macaddr_sparc(tp))
14279                         return 0;
14280 #endif
14281                 return -EINVAL;
14282         }
14283         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14284         return 0;
14285 }
14286
14287 #define BOUNDARY_SINGLE_CACHELINE       1
14288 #define BOUNDARY_MULTI_CACHELINE        2
14289
14290 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14291 {
14292         int cacheline_size;
14293         u8 byte;
14294         int goal;
14295
14296         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14297         if (byte == 0)
14298                 cacheline_size = 1024;
14299         else
14300                 cacheline_size = (int) byte * 4;
14301
14302         /* On 5703 and later chips, the boundary bits have no
14303          * effect.
14304          */
14305         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14306             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14307             !tg3_flag(tp, PCI_EXPRESS))
14308                 goto out;
14309
14310 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14311         goal = BOUNDARY_MULTI_CACHELINE;
14312 #else
14313 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14314         goal = BOUNDARY_SINGLE_CACHELINE;
14315 #else
14316         goal = 0;
14317 #endif
14318 #endif
14319
14320         if (tg3_flag(tp, 57765_PLUS)) {
14321                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14322                 goto out;
14323         }
14324
14325         if (!goal)
14326                 goto out;
14327
14328         /* PCI controllers on most RISC systems tend to disconnect
14329          * when a device tries to burst across a cache-line boundary.
14330          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14331          *
14332          * Unfortunately, for PCI-E there are only limited
14333          * write-side controls for this, and thus for reads
14334          * we will still get the disconnects.  We'll also waste
14335          * these PCI cycles for both read and write for chips
14336          * other than 5700 and 5701 which do not implement the
14337          * boundary bits.
14338          */
14339         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14340                 switch (cacheline_size) {
14341                 case 16:
14342                 case 32:
14343                 case 64:
14344                 case 128:
14345                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14346                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14347                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14348                         } else {
14349                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14350                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14351                         }
14352                         break;
14353
14354                 case 256:
14355                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14356                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14357                         break;
14358
14359                 default:
14360                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14361                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14362                         break;
14363                 }
14364         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14365                 switch (cacheline_size) {
14366                 case 16:
14367                 case 32:
14368                 case 64:
14369                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14370                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14371                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14372                                 break;
14373                         }
14374                         /* fallthrough */
14375                 case 128:
14376                 default:
14377                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14378                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14379                         break;
14380                 }
14381         } else {
14382                 switch (cacheline_size) {
14383                 case 16:
14384                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14385                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14386                                         DMA_RWCTRL_WRITE_BNDRY_16);
14387                                 break;
14388                         }
14389                         /* fallthrough */
14390                 case 32:
14391                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14392                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14393                                         DMA_RWCTRL_WRITE_BNDRY_32);
14394                                 break;
14395                         }
14396                         /* fallthrough */
14397                 case 64:
14398                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14399                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14400                                         DMA_RWCTRL_WRITE_BNDRY_64);
14401                                 break;
14402                         }
14403                         /* fallthrough */
14404                 case 128:
14405                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14406                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14407                                         DMA_RWCTRL_WRITE_BNDRY_128);
14408                                 break;
14409                         }
14410                         /* fallthrough */
14411                 case 256:
14412                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14413                                 DMA_RWCTRL_WRITE_BNDRY_256);
14414                         break;
14415                 case 512:
14416                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14417                                 DMA_RWCTRL_WRITE_BNDRY_512);
14418                         break;
14419                 case 1024:
14420                 default:
14421                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14422                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14423                         break;
14424                 }
14425         }
14426
14427 out:
14428         return val;
14429 }
14430
14431 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14432 {
14433         struct tg3_internal_buffer_desc test_desc;
14434         u32 sram_dma_descs;
14435         int i, ret;
14436
14437         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14438
14439         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14440         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14441         tw32(RDMAC_STATUS, 0);
14442         tw32(WDMAC_STATUS, 0);
14443
14444         tw32(BUFMGR_MODE, 0);
14445         tw32(FTQ_RESET, 0);
14446
14447         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14448         test_desc.addr_lo = buf_dma & 0xffffffff;
14449         test_desc.nic_mbuf = 0x00002100;
14450         test_desc.len = size;
14451
14452         /*
14453          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14454          * the *second* time the tg3 driver was getting loaded after an
14455          * initial scan.
14456          *
14457          * Broadcom tells me:
14458          *   ...the DMA engine is connected to the GRC block and a DMA
14459          *   reset may affect the GRC block in some unpredictable way...
14460          *   The behavior of resets to individual blocks has not been tested.
14461          *
14462          * Broadcom noted the GRC reset will also reset all sub-components.
14463          */
14464         if (to_device) {
14465                 test_desc.cqid_sqid = (13 << 8) | 2;
14466
14467                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14468                 udelay(40);
14469         } else {
14470                 test_desc.cqid_sqid = (16 << 8) | 7;
14471
14472                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14473                 udelay(40);
14474         }
14475         test_desc.flags = 0x00000005;
14476
14477         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14478                 u32 val;
14479
14480                 val = *(((u32 *)&test_desc) + i);
14481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14482                                        sram_dma_descs + (i * sizeof(u32)));
14483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14484         }
14485         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14486
14487         if (to_device)
14488                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14489         else
14490                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14491
14492         ret = -ENODEV;
14493         for (i = 0; i < 40; i++) {
14494                 u32 val;
14495
14496                 if (to_device)
14497                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14498                 else
14499                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14500                 if ((val & 0xffff) == sram_dma_descs) {
14501                         ret = 0;
14502                         break;
14503                 }
14504
14505                 udelay(100);
14506         }
14507
14508         return ret;
14509 }
14510
14511 #define TEST_BUFFER_SIZE        0x2000
14512
14513 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14514         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14515         { },
14516 };
14517
14518 static int __devinit tg3_test_dma(struct tg3 *tp)
14519 {
14520         dma_addr_t buf_dma;
14521         u32 *buf, saved_dma_rwctrl;
14522         int ret = 0;
14523
14524         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14525                                  &buf_dma, GFP_KERNEL);
14526         if (!buf) {
14527                 ret = -ENOMEM;
14528                 goto out_nofree;
14529         }
14530
14531         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14532                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14533
14534         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14535
14536         if (tg3_flag(tp, 57765_PLUS))
14537                 goto out;
14538
14539         if (tg3_flag(tp, PCI_EXPRESS)) {
14540                 /* DMA read watermark not used on PCIE */
14541                 tp->dma_rwctrl |= 0x00180000;
14542         } else if (!tg3_flag(tp, PCIX_MODE)) {
14543                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14544                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14545                         tp->dma_rwctrl |= 0x003f0000;
14546                 else
14547                         tp->dma_rwctrl |= 0x003f000f;
14548         } else {
14549                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14550                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14551                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14552                         u32 read_water = 0x7;
14553
14554                         /* If the 5704 is behind the EPB bridge, we can
14555                          * do the less restrictive ONE_DMA workaround for
14556                          * better performance.
14557                          */
14558                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14559                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14560                                 tp->dma_rwctrl |= 0x8000;
14561                         else if (ccval == 0x6 || ccval == 0x7)
14562                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14563
14564                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14565                                 read_water = 4;
14566                         /* Set bit 23 to enable PCIX hw bug fix */
14567                         tp->dma_rwctrl |=
14568                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14569                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14570                                 (1 << 23);
14571                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14572                         /* 5780 always in PCIX mode */
14573                         tp->dma_rwctrl |= 0x00144000;
14574                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14575                         /* 5714 always in PCIX mode */
14576                         tp->dma_rwctrl |= 0x00148000;
14577                 } else {
14578                         tp->dma_rwctrl |= 0x001b000f;
14579                 }
14580         }
14581
14582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14583             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14584                 tp->dma_rwctrl &= 0xfffffff0;
14585
14586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14588                 /* Remove this if it causes problems for some boards. */
14589                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14590
14591                 /* On 5700/5701 chips, we need to set this bit.
14592                  * Otherwise the chip will issue cacheline transactions
14593                  * to streamable DMA memory with not all the byte
14594                  * enables turned on.  This is an error on several
14595                  * RISC PCI controllers, in particular sparc64.
14596                  *
14597                  * On 5703/5704 chips, this bit has been reassigned
14598                  * a different meaning.  In particular, it is used
14599                  * on those chips to enable a PCI-X workaround.
14600                  */
14601                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14602         }
14603
14604         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14605
14606 #if 0
14607         /* Unneeded, already done by tg3_get_invariants.  */
14608         tg3_switch_clocks(tp);
14609 #endif
14610
14611         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14612             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14613                 goto out;
14614
14615         /* It is best to perform DMA test with maximum write burst size
14616          * to expose the 5700/5701 write DMA bug.
14617          */
14618         saved_dma_rwctrl = tp->dma_rwctrl;
14619         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14620         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14621
14622         while (1) {
14623                 u32 *p = buf, i;
14624
14625                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14626                         p[i] = i;
14627
14628                 /* Send the buffer to the chip. */
14629                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14630                 if (ret) {
14631                         dev_err(&tp->pdev->dev,
14632                                 "%s: Buffer write failed. err = %d\n",
14633                                 __func__, ret);
14634                         break;
14635                 }
14636
14637 #if 0
14638                 /* validate data reached card RAM correctly. */
14639                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14640                         u32 val;
14641                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14642                         if (le32_to_cpu(val) != p[i]) {
14643                                 dev_err(&tp->pdev->dev,
14644                                         "%s: Buffer corrupted on device! "
14645                                         "(%d != %d)\n", __func__, val, i);
14646                                 /* ret = -ENODEV here? */
14647                         }
14648                         p[i] = 0;
14649                 }
14650 #endif
14651                 /* Now read it back. */
14652                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14653                 if (ret) {
14654                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14655                                 "err = %d\n", __func__, ret);
14656                         break;
14657                 }
14658
14659                 /* Verify it. */
14660                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14661                         if (p[i] == i)
14662                                 continue;
14663
14664                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14665                             DMA_RWCTRL_WRITE_BNDRY_16) {
14666                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14667                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14668                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14669                                 break;
14670                         } else {
14671                                 dev_err(&tp->pdev->dev,
14672                                         "%s: Buffer corrupted on read back! "
14673                                         "(%d != %d)\n", __func__, p[i], i);
14674                                 ret = -ENODEV;
14675                                 goto out;
14676                         }
14677                 }
14678
14679                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14680                         /* Success. */
14681                         ret = 0;
14682                         break;
14683                 }
14684         }
14685         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14686             DMA_RWCTRL_WRITE_BNDRY_16) {
14687                 /* DMA test passed without adjusting DMA boundary,
14688                  * now look for chipsets that are known to expose the
14689                  * DMA bug without failing the test.
14690                  */
14691                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14692                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14693                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14694                 } else {
14695                         /* Safe to use the calculated DMA boundary. */
14696                         tp->dma_rwctrl = saved_dma_rwctrl;
14697                 }
14698
14699                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14700         }
14701
14702 out:
14703         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14704 out_nofree:
14705         return ret;
14706 }
14707
14708 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14709 {
14710         if (tg3_flag(tp, 57765_PLUS)) {
14711                 tp->bufmgr_config.mbuf_read_dma_low_water =
14712                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14713                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14714                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14715                 tp->bufmgr_config.mbuf_high_water =
14716                         DEFAULT_MB_HIGH_WATER_57765;
14717
14718                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14719                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14720                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14721                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14722                 tp->bufmgr_config.mbuf_high_water_jumbo =
14723                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14724         } else if (tg3_flag(tp, 5705_PLUS)) {
14725                 tp->bufmgr_config.mbuf_read_dma_low_water =
14726                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14727                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14728                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14729                 tp->bufmgr_config.mbuf_high_water =
14730                         DEFAULT_MB_HIGH_WATER_5705;
14731                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14732                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14733                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14734                         tp->bufmgr_config.mbuf_high_water =
14735                                 DEFAULT_MB_HIGH_WATER_5906;
14736                 }
14737
14738                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14739                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14740                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14741                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14742                 tp->bufmgr_config.mbuf_high_water_jumbo =
14743                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14744         } else {
14745                 tp->bufmgr_config.mbuf_read_dma_low_water =
14746                         DEFAULT_MB_RDMA_LOW_WATER;
14747                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14748                         DEFAULT_MB_MACRX_LOW_WATER;
14749                 tp->bufmgr_config.mbuf_high_water =
14750                         DEFAULT_MB_HIGH_WATER;
14751
14752                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14753                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14754                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14755                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14756                 tp->bufmgr_config.mbuf_high_water_jumbo =
14757                         DEFAULT_MB_HIGH_WATER_JUMBO;
14758         }
14759
14760         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14761         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14762 }
14763
14764 static char * __devinit tg3_phy_string(struct tg3 *tp)
14765 {
14766         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14767         case TG3_PHY_ID_BCM5400:        return "5400";
14768         case TG3_PHY_ID_BCM5401:        return "5401";
14769         case TG3_PHY_ID_BCM5411:        return "5411";
14770         case TG3_PHY_ID_BCM5701:        return "5701";
14771         case TG3_PHY_ID_BCM5703:        return "5703";
14772         case TG3_PHY_ID_BCM5704:        return "5704";
14773         case TG3_PHY_ID_BCM5705:        return "5705";
14774         case TG3_PHY_ID_BCM5750:        return "5750";
14775         case TG3_PHY_ID_BCM5752:        return "5752";
14776         case TG3_PHY_ID_BCM5714:        return "5714";
14777         case TG3_PHY_ID_BCM5780:        return "5780";
14778         case TG3_PHY_ID_BCM5755:        return "5755";
14779         case TG3_PHY_ID_BCM5787:        return "5787";
14780         case TG3_PHY_ID_BCM5784:        return "5784";
14781         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14782         case TG3_PHY_ID_BCM5906:        return "5906";
14783         case TG3_PHY_ID_BCM5761:        return "5761";
14784         case TG3_PHY_ID_BCM5718C:       return "5718C";
14785         case TG3_PHY_ID_BCM5718S:       return "5718S";
14786         case TG3_PHY_ID_BCM57765:       return "57765";
14787         case TG3_PHY_ID_BCM5719C:       return "5719C";
14788         case TG3_PHY_ID_BCM5720C:       return "5720C";
14789         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14790         case 0:                 return "serdes";
14791         default:                return "unknown";
14792         }
14793 }
14794
14795 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14796 {
14797         if (tg3_flag(tp, PCI_EXPRESS)) {
14798                 strcpy(str, "PCI Express");
14799                 return str;
14800         } else if (tg3_flag(tp, PCIX_MODE)) {
14801                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14802
14803                 strcpy(str, "PCIX:");
14804
14805                 if ((clock_ctrl == 7) ||
14806                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14807                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14808                         strcat(str, "133MHz");
14809                 else if (clock_ctrl == 0)
14810                         strcat(str, "33MHz");
14811                 else if (clock_ctrl == 2)
14812                         strcat(str, "50MHz");
14813                 else if (clock_ctrl == 4)
14814                         strcat(str, "66MHz");
14815                 else if (clock_ctrl == 6)
14816                         strcat(str, "100MHz");
14817         } else {
14818                 strcpy(str, "PCI:");
14819                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14820                         strcat(str, "66MHz");
14821                 else
14822                         strcat(str, "33MHz");
14823         }
14824         if (tg3_flag(tp, PCI_32BIT))
14825                 strcat(str, ":32-bit");
14826         else
14827                 strcat(str, ":64-bit");
14828         return str;
14829 }
14830
14831 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14832 {
14833         struct pci_dev *peer;
14834         unsigned int func, devnr = tp->pdev->devfn & ~7;
14835
14836         for (func = 0; func < 8; func++) {
14837                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14838                 if (peer && peer != tp->pdev)
14839                         break;
14840                 pci_dev_put(peer);
14841         }
14842         /* 5704 can be configured in single-port mode, set peer to
14843          * tp->pdev in that case.
14844          */
14845         if (!peer) {
14846                 peer = tp->pdev;
14847                 return peer;
14848         }
14849
14850         /*
14851          * We don't need to keep the refcount elevated; there's no way
14852          * to remove one half of this device without removing the other
14853          */
14854         pci_dev_put(peer);
14855
14856         return peer;
14857 }
14858
14859 static void __devinit tg3_init_coal(struct tg3 *tp)
14860 {
14861         struct ethtool_coalesce *ec = &tp->coal;
14862
14863         memset(ec, 0, sizeof(*ec));
14864         ec->cmd = ETHTOOL_GCOALESCE;
14865         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14866         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14867         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14868         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14869         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14870         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14871         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14872         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14873         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14874
14875         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14876                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14877                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14878                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14879                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14880                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14881         }
14882
14883         if (tg3_flag(tp, 5705_PLUS)) {
14884                 ec->rx_coalesce_usecs_irq = 0;
14885                 ec->tx_coalesce_usecs_irq = 0;
14886                 ec->stats_block_coalesce_usecs = 0;
14887         }
14888 }
14889
14890 static const struct net_device_ops tg3_netdev_ops = {
14891         .ndo_open               = tg3_open,
14892         .ndo_stop               = tg3_close,
14893         .ndo_start_xmit         = tg3_start_xmit,
14894         .ndo_get_stats64        = tg3_get_stats64,
14895         .ndo_validate_addr      = eth_validate_addr,
14896         .ndo_set_multicast_list = tg3_set_rx_mode,
14897         .ndo_set_mac_address    = tg3_set_mac_addr,
14898         .ndo_do_ioctl           = tg3_ioctl,
14899         .ndo_tx_timeout         = tg3_tx_timeout,
14900         .ndo_change_mtu         = tg3_change_mtu,
14901         .ndo_fix_features       = tg3_fix_features,
14902         .ndo_set_features       = tg3_set_features,
14903 #ifdef CONFIG_NET_POLL_CONTROLLER
14904         .ndo_poll_controller    = tg3_poll_controller,
14905 #endif
14906 };
14907
14908 static int __devinit tg3_init_one(struct pci_dev *pdev,
14909                                   const struct pci_device_id *ent)
14910 {
14911         struct net_device *dev;
14912         struct tg3 *tp;
14913         int i, err, pm_cap;
14914         u32 sndmbx, rcvmbx, intmbx;
14915         char str[40];
14916         u64 dma_mask, persist_dma_mask;
14917         u32 features = 0;
14918
14919         printk_once(KERN_INFO "%s\n", version);
14920
14921         err = pci_enable_device(pdev);
14922         if (err) {
14923                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14924                 return err;
14925         }
14926
14927         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14928         if (err) {
14929                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14930                 goto err_out_disable_pdev;
14931         }
14932
14933         pci_set_master(pdev);
14934
14935         /* Find power-management capability. */
14936         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14937         if (pm_cap == 0) {
14938                 dev_err(&pdev->dev,
14939                         "Cannot find Power Management capability, aborting\n");
14940                 err = -EIO;
14941                 goto err_out_free_res;
14942         }
14943
14944         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14945         if (!dev) {
14946                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14947                 err = -ENOMEM;
14948                 goto err_out_free_res;
14949         }
14950
14951         SET_NETDEV_DEV(dev, &pdev->dev);
14952
14953         tp = netdev_priv(dev);
14954         tp->pdev = pdev;
14955         tp->dev = dev;
14956         tp->pm_cap = pm_cap;
14957         tp->rx_mode = TG3_DEF_RX_MODE;
14958         tp->tx_mode = TG3_DEF_TX_MODE;
14959
14960         if (tg3_debug > 0)
14961                 tp->msg_enable = tg3_debug;
14962         else
14963                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14964
14965         /* The word/byte swap controls here control register access byte
14966          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14967          * setting below.
14968          */
14969         tp->misc_host_ctrl =
14970                 MISC_HOST_CTRL_MASK_PCI_INT |
14971                 MISC_HOST_CTRL_WORD_SWAP |
14972                 MISC_HOST_CTRL_INDIR_ACCESS |
14973                 MISC_HOST_CTRL_PCISTATE_RW;
14974
14975         /* The NONFRM (non-frame) byte/word swap controls take effect
14976          * on descriptor entries, anything which isn't packet data.
14977          *
14978          * The StrongARM chips on the board (one for tx, one for rx)
14979          * are running in big-endian mode.
14980          */
14981         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14982                         GRC_MODE_WSWAP_NONFRM_DATA);
14983 #ifdef __BIG_ENDIAN
14984         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14985 #endif
14986         spin_lock_init(&tp->lock);
14987         spin_lock_init(&tp->indirect_lock);
14988         INIT_WORK(&tp->reset_task, tg3_reset_task);
14989
14990         tp->regs = pci_ioremap_bar(pdev, BAR_0);
14991         if (!tp->regs) {
14992                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14993                 err = -ENOMEM;
14994                 goto err_out_free_dev;
14995         }
14996
14997         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14998         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14999
15000         dev->ethtool_ops = &tg3_ethtool_ops;
15001         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15002         dev->netdev_ops = &tg3_netdev_ops;
15003         dev->irq = pdev->irq;
15004
15005         err = tg3_get_invariants(tp);
15006         if (err) {
15007                 dev_err(&pdev->dev,
15008                         "Problem fetching invariants of chip, aborting\n");
15009                 goto err_out_iounmap;
15010         }
15011
15012         /* The EPB bridge inside 5714, 5715, and 5780 and any
15013          * device behind the EPB cannot support DMA addresses > 40-bit.
15014          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15015          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15016          * do DMA address check in tg3_start_xmit().
15017          */
15018         if (tg3_flag(tp, IS_5788))
15019                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15020         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15021                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15022 #ifdef CONFIG_HIGHMEM
15023                 dma_mask = DMA_BIT_MASK(64);
15024 #endif
15025         } else
15026                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15027
15028         /* Configure DMA attributes. */
15029         if (dma_mask > DMA_BIT_MASK(32)) {
15030                 err = pci_set_dma_mask(pdev, dma_mask);
15031                 if (!err) {
15032                         features |= NETIF_F_HIGHDMA;
15033                         err = pci_set_consistent_dma_mask(pdev,
15034                                                           persist_dma_mask);
15035                         if (err < 0) {
15036                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15037                                         "DMA for consistent allocations\n");
15038                                 goto err_out_iounmap;
15039                         }
15040                 }
15041         }
15042         if (err || dma_mask == DMA_BIT_MASK(32)) {
15043                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15044                 if (err) {
15045                         dev_err(&pdev->dev,
15046                                 "No usable DMA configuration, aborting\n");
15047                         goto err_out_iounmap;
15048                 }
15049         }
15050
15051         tg3_init_bufmgr_config(tp);
15052
15053         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15054
15055         /* 5700 B0 chips do not support checksumming correctly due
15056          * to hardware bugs.
15057          */
15058         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15059                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15060
15061                 if (tg3_flag(tp, 5755_PLUS))
15062                         features |= NETIF_F_IPV6_CSUM;
15063         }
15064
15065         /* TSO is on by default on chips that support hardware TSO.
15066          * Firmware TSO on older chips gives lower performance, so it
15067          * is off by default, but can be enabled using ethtool.
15068          */
15069         if ((tg3_flag(tp, HW_TSO_1) ||
15070              tg3_flag(tp, HW_TSO_2) ||
15071              tg3_flag(tp, HW_TSO_3)) &&
15072             (features & NETIF_F_IP_CSUM))
15073                 features |= NETIF_F_TSO;
15074         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15075                 if (features & NETIF_F_IPV6_CSUM)
15076                         features |= NETIF_F_TSO6;
15077                 if (tg3_flag(tp, HW_TSO_3) ||
15078                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15079                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15080                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15081                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15082                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15083                         features |= NETIF_F_TSO_ECN;
15084         }
15085
15086         dev->features |= features;
15087         dev->vlan_features |= features;
15088
15089         /*
15090          * Add loopback capability only for a subset of devices that support
15091          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15092          * loopback for the remaining devices.
15093          */
15094         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15095             !tg3_flag(tp, CPMU_PRESENT))
15096                 /* Add the loopback capability */
15097                 features |= NETIF_F_LOOPBACK;
15098
15099         dev->hw_features |= features;
15100
15101         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15102             !tg3_flag(tp, TSO_CAPABLE) &&
15103             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15104                 tg3_flag_set(tp, MAX_RXPEND_64);
15105                 tp->rx_pending = 63;
15106         }
15107
15108         err = tg3_get_device_address(tp);
15109         if (err) {
15110                 dev_err(&pdev->dev,
15111                         "Could not obtain valid ethernet address, aborting\n");
15112                 goto err_out_iounmap;
15113         }
15114
15115         if (tg3_flag(tp, ENABLE_APE)) {
15116                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15117                 if (!tp->aperegs) {
15118                         dev_err(&pdev->dev,
15119                                 "Cannot map APE registers, aborting\n");
15120                         err = -ENOMEM;
15121                         goto err_out_iounmap;
15122                 }
15123
15124                 tg3_ape_lock_init(tp);
15125
15126                 if (tg3_flag(tp, ENABLE_ASF))
15127                         tg3_read_dash_ver(tp);
15128         }
15129
15130         /*
15131          * Reset chip in case UNDI or EFI driver did not shutdown
15132          * DMA self test will enable WDMAC and we'll see (spurious)
15133          * pending DMA on the PCI bus at that point.
15134          */
15135         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15136             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15137                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15138                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15139         }
15140
15141         err = tg3_test_dma(tp);
15142         if (err) {
15143                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15144                 goto err_out_apeunmap;
15145         }
15146
15147         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15148         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15149         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15150         for (i = 0; i < tp->irq_max; i++) {
15151                 struct tg3_napi *tnapi = &tp->napi[i];
15152
15153                 tnapi->tp = tp;
15154                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15155
15156                 tnapi->int_mbox = intmbx;
15157                 if (i < 4)
15158                         intmbx += 0x8;
15159                 else
15160                         intmbx += 0x4;
15161
15162                 tnapi->consmbox = rcvmbx;
15163                 tnapi->prodmbox = sndmbx;
15164
15165                 if (i)
15166                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15167                 else
15168                         tnapi->coal_now = HOSTCC_MODE_NOW;
15169
15170                 if (!tg3_flag(tp, SUPPORT_MSIX))
15171                         break;
15172
15173                 /*
15174                  * If we support MSIX, we'll be using RSS.  If we're using
15175                  * RSS, the first vector only handles link interrupts and the
15176                  * remaining vectors handle rx and tx interrupts.  Reuse the
15177                  * mailbox values for the next iteration.  The values we setup
15178                  * above are still useful for the single vectored mode.
15179                  */
15180                 if (!i)
15181                         continue;
15182
15183                 rcvmbx += 0x8;
15184
15185                 if (sndmbx & 0x4)
15186                         sndmbx -= 0x4;
15187                 else
15188                         sndmbx += 0xc;
15189         }
15190
15191         tg3_init_coal(tp);
15192
15193         pci_set_drvdata(pdev, dev);
15194
15195         err = register_netdev(dev);
15196         if (err) {
15197                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15198                 goto err_out_apeunmap;
15199         }
15200
15201         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15202                     tp->board_part_number,
15203                     tp->pci_chip_rev_id,
15204                     tg3_bus_string(tp, str),
15205                     dev->dev_addr);
15206
15207         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15208                 struct phy_device *phydev;
15209                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15210                 netdev_info(dev,
15211                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15212                             phydev->drv->name, dev_name(&phydev->dev));
15213         } else {
15214                 char *ethtype;
15215
15216                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15217                         ethtype = "10/100Base-TX";
15218                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15219                         ethtype = "1000Base-SX";
15220                 else
15221                         ethtype = "10/100/1000Base-T";
15222
15223                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15224                             "(WireSpeed[%d], EEE[%d])\n",
15225                             tg3_phy_string(tp), ethtype,
15226                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15227                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15228         }
15229
15230         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15231                     (dev->features & NETIF_F_RXCSUM) != 0,
15232                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15233                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15234                     tg3_flag(tp, ENABLE_ASF) != 0,
15235                     tg3_flag(tp, TSO_CAPABLE) != 0);
15236         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15237                     tp->dma_rwctrl,
15238                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15239                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15240
15241         pci_save_state(pdev);
15242
15243         return 0;
15244
15245 err_out_apeunmap:
15246         if (tp->aperegs) {
15247                 iounmap(tp->aperegs);
15248                 tp->aperegs = NULL;
15249         }
15250
15251 err_out_iounmap:
15252         if (tp->regs) {
15253                 iounmap(tp->regs);
15254                 tp->regs = NULL;
15255         }
15256
15257 err_out_free_dev:
15258         free_netdev(dev);
15259
15260 err_out_free_res:
15261         pci_release_regions(pdev);
15262
15263 err_out_disable_pdev:
15264         pci_disable_device(pdev);
15265         pci_set_drvdata(pdev, NULL);
15266         return err;
15267 }
15268
15269 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15270 {
15271         struct net_device *dev = pci_get_drvdata(pdev);
15272
15273         if (dev) {
15274                 struct tg3 *tp = netdev_priv(dev);
15275
15276                 if (tp->fw)
15277                         release_firmware(tp->fw);
15278
15279                 cancel_work_sync(&tp->reset_task);
15280
15281                 if (!tg3_flag(tp, USE_PHYLIB)) {
15282                         tg3_phy_fini(tp);
15283                         tg3_mdio_fini(tp);
15284                 }
15285
15286                 unregister_netdev(dev);
15287                 if (tp->aperegs) {
15288                         iounmap(tp->aperegs);
15289                         tp->aperegs = NULL;
15290                 }
15291                 if (tp->regs) {
15292                         iounmap(tp->regs);
15293                         tp->regs = NULL;
15294                 }
15295                 free_netdev(dev);
15296                 pci_release_regions(pdev);
15297                 pci_disable_device(pdev);
15298                 pci_set_drvdata(pdev, NULL);
15299         }
15300 }
15301
15302 #ifdef CONFIG_PM_SLEEP
15303 static int tg3_suspend(struct device *device)
15304 {
15305         struct pci_dev *pdev = to_pci_dev(device);
15306         struct net_device *dev = pci_get_drvdata(pdev);
15307         struct tg3 *tp = netdev_priv(dev);
15308         int err;
15309
15310         if (!netif_running(dev))
15311                 return 0;
15312
15313         flush_work_sync(&tp->reset_task);
15314         tg3_phy_stop(tp);
15315         tg3_netif_stop(tp);
15316
15317         del_timer_sync(&tp->timer);
15318
15319         tg3_full_lock(tp, 1);
15320         tg3_disable_ints(tp);
15321         tg3_full_unlock(tp);
15322
15323         netif_device_detach(dev);
15324
15325         tg3_full_lock(tp, 0);
15326         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15327         tg3_flag_clear(tp, INIT_COMPLETE);
15328         tg3_full_unlock(tp);
15329
15330         err = tg3_power_down_prepare(tp);
15331         if (err) {
15332                 int err2;
15333
15334                 tg3_full_lock(tp, 0);
15335
15336                 tg3_flag_set(tp, INIT_COMPLETE);
15337                 err2 = tg3_restart_hw(tp, 1);
15338                 if (err2)
15339                         goto out;
15340
15341                 tp->timer.expires = jiffies + tp->timer_offset;
15342                 add_timer(&tp->timer);
15343
15344                 netif_device_attach(dev);
15345                 tg3_netif_start(tp);
15346
15347 out:
15348                 tg3_full_unlock(tp);
15349
15350                 if (!err2)
15351                         tg3_phy_start(tp);
15352         }
15353
15354         return err;
15355 }
15356
15357 static int tg3_resume(struct device *device)
15358 {
15359         struct pci_dev *pdev = to_pci_dev(device);
15360         struct net_device *dev = pci_get_drvdata(pdev);
15361         struct tg3 *tp = netdev_priv(dev);
15362         int err;
15363
15364         if (!netif_running(dev))
15365                 return 0;
15366
15367         netif_device_attach(dev);
15368
15369         tg3_full_lock(tp, 0);
15370
15371         tg3_flag_set(tp, INIT_COMPLETE);
15372         err = tg3_restart_hw(tp, 1);
15373         if (err)
15374                 goto out;
15375
15376         tp->timer.expires = jiffies + tp->timer_offset;
15377         add_timer(&tp->timer);
15378
15379         tg3_netif_start(tp);
15380
15381 out:
15382         tg3_full_unlock(tp);
15383
15384         if (!err)
15385                 tg3_phy_start(tp);
15386
15387         return err;
15388 }
15389
15390 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15391 #define TG3_PM_OPS (&tg3_pm_ops)
15392
15393 #else
15394
15395 #define TG3_PM_OPS NULL
15396
15397 #endif /* CONFIG_PM_SLEEP */
15398
15399 /**
15400  * tg3_io_error_detected - called when PCI error is detected
15401  * @pdev: Pointer to PCI device
15402  * @state: The current pci connection state
15403  *
15404  * This function is called after a PCI bus error affecting
15405  * this device has been detected.
15406  */
15407 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15408                                               pci_channel_state_t state)
15409 {
15410         struct net_device *netdev = pci_get_drvdata(pdev);
15411         struct tg3 *tp = netdev_priv(netdev);
15412         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15413
15414         netdev_info(netdev, "PCI I/O error detected\n");
15415
15416         rtnl_lock();
15417
15418         if (!netif_running(netdev))
15419                 goto done;
15420
15421         tg3_phy_stop(tp);
15422
15423         tg3_netif_stop(tp);
15424
15425         del_timer_sync(&tp->timer);
15426         tg3_flag_clear(tp, RESTART_TIMER);
15427
15428         /* Want to make sure that the reset task doesn't run */
15429         cancel_work_sync(&tp->reset_task);
15430         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15431         tg3_flag_clear(tp, RESTART_TIMER);
15432
15433         netif_device_detach(netdev);
15434
15435         /* Clean up software state, even if MMIO is blocked */
15436         tg3_full_lock(tp, 0);
15437         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15438         tg3_full_unlock(tp);
15439
15440 done:
15441         if (state == pci_channel_io_perm_failure)
15442                 err = PCI_ERS_RESULT_DISCONNECT;
15443         else
15444                 pci_disable_device(pdev);
15445
15446         rtnl_unlock();
15447
15448         return err;
15449 }
15450
15451 /**
15452  * tg3_io_slot_reset - called after the pci bus has been reset.
15453  * @pdev: Pointer to PCI device
15454  *
15455  * Restart the card from scratch, as if from a cold-boot.
15456  * At this point, the card has exprienced a hard reset,
15457  * followed by fixups by BIOS, and has its config space
15458  * set up identically to what it was at cold boot.
15459  */
15460 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15461 {
15462         struct net_device *netdev = pci_get_drvdata(pdev);
15463         struct tg3 *tp = netdev_priv(netdev);
15464         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15465         int err;
15466
15467         rtnl_lock();
15468
15469         if (pci_enable_device(pdev)) {
15470                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15471                 goto done;
15472         }
15473
15474         pci_set_master(pdev);
15475         pci_restore_state(pdev);
15476         pci_save_state(pdev);
15477
15478         if (!netif_running(netdev)) {
15479                 rc = PCI_ERS_RESULT_RECOVERED;
15480                 goto done;
15481         }
15482
15483         err = tg3_power_up(tp);
15484         if (err) {
15485                 netdev_err(netdev, "Failed to restore register access.\n");
15486                 goto done;
15487         }
15488
15489         rc = PCI_ERS_RESULT_RECOVERED;
15490
15491 done:
15492         rtnl_unlock();
15493
15494         return rc;
15495 }
15496
15497 /**
15498  * tg3_io_resume - called when traffic can start flowing again.
15499  * @pdev: Pointer to PCI device
15500  *
15501  * This callback is called when the error recovery driver tells
15502  * us that its OK to resume normal operation.
15503  */
15504 static void tg3_io_resume(struct pci_dev *pdev)
15505 {
15506         struct net_device *netdev = pci_get_drvdata(pdev);
15507         struct tg3 *tp = netdev_priv(netdev);
15508         int err;
15509
15510         rtnl_lock();
15511
15512         if (!netif_running(netdev))
15513                 goto done;
15514
15515         tg3_full_lock(tp, 0);
15516         tg3_flag_set(tp, INIT_COMPLETE);
15517         err = tg3_restart_hw(tp, 1);
15518         tg3_full_unlock(tp);
15519         if (err) {
15520                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15521                 goto done;
15522         }
15523
15524         netif_device_attach(netdev);
15525
15526         tp->timer.expires = jiffies + tp->timer_offset;
15527         add_timer(&tp->timer);
15528
15529         tg3_netif_start(tp);
15530
15531         tg3_phy_start(tp);
15532
15533 done:
15534         rtnl_unlock();
15535 }
15536
15537 static struct pci_error_handlers tg3_err_handler = {
15538         .error_detected = tg3_io_error_detected,
15539         .slot_reset     = tg3_io_slot_reset,
15540         .resume         = tg3_io_resume
15541 };
15542
15543 static struct pci_driver tg3_driver = {
15544         .name           = DRV_MODULE_NAME,
15545         .id_table       = tg3_pci_tbl,
15546         .probe          = tg3_init_one,
15547         .remove         = __devexit_p(tg3_remove_one),
15548         .err_handler    = &tg3_err_handler,
15549         .driver.pm      = TG3_PM_OPS,
15550 };
15551
15552 static int __init tg3_init(void)
15553 {
15554         return pci_register_driver(&tg3_driver);
15555 }
15556
15557 static void __exit tg3_cleanup(void)
15558 {
15559         pci_unregister_driver(&tg3_driver);
15560 }
15561
15562 module_init(tg3_init);
15563 module_exit(tg3_cleanup);