skge: update version
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++)
620                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
621 }
622
623 static int tg3_ape_lock(struct tg3 *tp, int locknum)
624 {
625         int i, off;
626         int ret = 0;
627         u32 status, req, gnt;
628
629         if (!tg3_flag(tp, ENABLE_APE))
630                 return 0;
631
632         switch (locknum) {
633         case TG3_APE_LOCK_GRC:
634         case TG3_APE_LOCK_MEM:
635                 break;
636         default:
637                 return -EINVAL;
638         }
639
640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
641                 req = TG3_APE_LOCK_REQ;
642                 gnt = TG3_APE_LOCK_GRANT;
643         } else {
644                 req = TG3_APE_PER_LOCK_REQ;
645                 gnt = TG3_APE_PER_LOCK_GRANT;
646         }
647
648         off = 4 * locknum;
649
650         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
651
652         /* Wait for up to 1 millisecond to acquire lock. */
653         for (i = 0; i < 100; i++) {
654                 status = tg3_ape_read32(tp, gnt + off);
655                 if (status == APE_LOCK_GRANT_DRIVER)
656                         break;
657                 udelay(10);
658         }
659
660         if (status != APE_LOCK_GRANT_DRIVER) {
661                 /* Revoke the lock request. */
662                 tg3_ape_write32(tp, gnt + off,
663                                 APE_LOCK_GRANT_DRIVER);
664
665                 ret = -EBUSY;
666         }
667
668         return ret;
669 }
670
671 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
672 {
673         u32 gnt;
674
675         if (!tg3_flag(tp, ENABLE_APE))
676                 return;
677
678         switch (locknum) {
679         case TG3_APE_LOCK_GRC:
680         case TG3_APE_LOCK_MEM:
681                 break;
682         default:
683                 return;
684         }
685
686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
687                 gnt = TG3_APE_LOCK_GRANT;
688         else
689                 gnt = TG3_APE_PER_LOCK_GRANT;
690
691         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
692 }
693
694 static void tg3_disable_ints(struct tg3 *tp)
695 {
696         int i;
697
698         tw32(TG3PCI_MISC_HOST_CTRL,
699              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
700         for (i = 0; i < tp->irq_max; i++)
701                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
702 }
703
704 static void tg3_enable_ints(struct tg3 *tp)
705 {
706         int i;
707
708         tp->irq_sync = 0;
709         wmb();
710
711         tw32(TG3PCI_MISC_HOST_CTRL,
712              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
713
714         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
715         for (i = 0; i < tp->irq_cnt; i++) {
716                 struct tg3_napi *tnapi = &tp->napi[i];
717
718                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719                 if (tg3_flag(tp, 1SHOT_MSI))
720                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
721
722                 tp->coal_now |= tnapi->coal_now;
723         }
724
725         /* Force an initial interrupt */
726         if (!tg3_flag(tp, TAGGED_STATUS) &&
727             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
728                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
729         else
730                 tw32(HOSTCC_MODE, tp->coal_now);
731
732         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
733 }
734
735 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
736 {
737         struct tg3 *tp = tnapi->tp;
738         struct tg3_hw_status *sblk = tnapi->hw_status;
739         unsigned int work_exists = 0;
740
741         /* check for phy events */
742         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
743                 if (sblk->status & SD_STATUS_LINK_CHG)
744                         work_exists = 1;
745         }
746         /* check for RX/TX work to do */
747         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
748             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
749                 work_exists = 1;
750
751         return work_exists;
752 }
753
754 /* tg3_int_reenable
755  *  similar to tg3_enable_ints, but it accurately determines whether there
756  *  is new work pending and can return without flushing the PIO write
757  *  which reenables interrupts
758  */
759 static void tg3_int_reenable(struct tg3_napi *tnapi)
760 {
761         struct tg3 *tp = tnapi->tp;
762
763         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764         mmiowb();
765
766         /* When doing tagged status, this work check is unnecessary.
767          * The last_tag we write above tells the chip which piece of
768          * work we've completed.
769          */
770         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
771                 tw32(HOSTCC_MODE, tp->coalesce_mode |
772                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
773 }
774
775 static void tg3_switch_clocks(struct tg3 *tp)
776 {
777         u32 clock_ctrl;
778         u32 orig_clock_ctrl;
779
780         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781                 return;
782
783         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
784
785         orig_clock_ctrl = clock_ctrl;
786         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
787                        CLOCK_CTRL_CLKRUN_OENABLE |
788                        0x1f);
789         tp->pci_clock_ctrl = clock_ctrl;
790
791         if (tg3_flag(tp, 5705_PLUS)) {
792                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
793                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
794                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
795                 }
796         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
797                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798                             clock_ctrl |
799                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
800                             40);
801                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
802                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
803                             40);
804         }
805         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
806 }
807
808 #define PHY_BUSY_LOOPS  5000
809
810 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
811 {
812         u32 frame_val;
813         unsigned int loops;
814         int ret;
815
816         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
817                 tw32_f(MAC_MI_MODE,
818                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819                 udelay(80);
820         }
821
822         *val = 0x0;
823
824         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
825                       MI_COM_PHY_ADDR_MASK);
826         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
827                       MI_COM_REG_ADDR_MASK);
828         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
829
830         tw32_f(MAC_MI_COM, frame_val);
831
832         loops = PHY_BUSY_LOOPS;
833         while (loops != 0) {
834                 udelay(10);
835                 frame_val = tr32(MAC_MI_COM);
836
837                 if ((frame_val & MI_COM_BUSY) == 0) {
838                         udelay(5);
839                         frame_val = tr32(MAC_MI_COM);
840                         break;
841                 }
842                 loops -= 1;
843         }
844
845         ret = -EBUSY;
846         if (loops != 0) {
847                 *val = frame_val & MI_COM_DATA_MASK;
848                 ret = 0;
849         }
850
851         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852                 tw32_f(MAC_MI_MODE, tp->mi_mode);
853                 udelay(80);
854         }
855
856         return ret;
857 }
858
859 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
860 {
861         u32 frame_val;
862         unsigned int loops;
863         int ret;
864
865         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
867                 return 0;
868
869         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
870                 tw32_f(MAC_MI_MODE,
871                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
872                 udelay(80);
873         }
874
875         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
876                       MI_COM_PHY_ADDR_MASK);
877         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
878                       MI_COM_REG_ADDR_MASK);
879         frame_val |= (val & MI_COM_DATA_MASK);
880         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
881
882         tw32_f(MAC_MI_COM, frame_val);
883
884         loops = PHY_BUSY_LOOPS;
885         while (loops != 0) {
886                 udelay(10);
887                 frame_val = tr32(MAC_MI_COM);
888                 if ((frame_val & MI_COM_BUSY) == 0) {
889                         udelay(5);
890                         frame_val = tr32(MAC_MI_COM);
891                         break;
892                 }
893                 loops -= 1;
894         }
895
896         ret = -EBUSY;
897         if (loops != 0)
898                 ret = 0;
899
900         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
901                 tw32_f(MAC_MI_MODE, tp->mi_mode);
902                 udelay(80);
903         }
904
905         return ret;
906 }
907
908 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
909 {
910         int err;
911
912         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
913         if (err)
914                 goto done;
915
916         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
921                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
922         if (err)
923                 goto done;
924
925         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
926
927 done:
928         return err;
929 }
930
931 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
932 {
933         int err;
934
935         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
936         if (err)
937                 goto done;
938
939         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
940         if (err)
941                 goto done;
942
943         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
944                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
945         if (err)
946                 goto done;
947
948         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
949
950 done:
951         return err;
952 }
953
954 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
955 {
956         int err;
957
958         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
959         if (!err)
960                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
961
962         return err;
963 }
964
965 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
966 {
967         int err;
968
969         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
970         if (!err)
971                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
972
973         return err;
974 }
975
976 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
977 {
978         int err;
979
980         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
981                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
982                            MII_TG3_AUXCTL_SHDWSEL_MISC);
983         if (!err)
984                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
985
986         return err;
987 }
988
989 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
990 {
991         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
992                 set |= MII_TG3_AUXCTL_MISC_WREN;
993
994         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
995 }
996
997 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1000                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1001
1002 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1003         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1004                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1005
1006 static int tg3_bmcr_reset(struct tg3 *tp)
1007 {
1008         u32 phy_control;
1009         int limit, err;
1010
1011         /* OK, reset it, and poll the BMCR_RESET bit until it
1012          * clears or we time out.
1013          */
1014         phy_control = BMCR_RESET;
1015         err = tg3_writephy(tp, MII_BMCR, phy_control);
1016         if (err != 0)
1017                 return -EBUSY;
1018
1019         limit = 5000;
1020         while (limit--) {
1021                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1022                 if (err != 0)
1023                         return -EBUSY;
1024
1025                 if ((phy_control & BMCR_RESET) == 0) {
1026                         udelay(40);
1027                         break;
1028                 }
1029                 udelay(10);
1030         }
1031         if (limit < 0)
1032                 return -EBUSY;
1033
1034         return 0;
1035 }
1036
1037 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1038 {
1039         struct tg3 *tp = bp->priv;
1040         u32 val;
1041
1042         spin_lock_bh(&tp->lock);
1043
1044         if (tg3_readphy(tp, reg, &val))
1045                 val = -EIO;
1046
1047         spin_unlock_bh(&tp->lock);
1048
1049         return val;
1050 }
1051
1052 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1053 {
1054         struct tg3 *tp = bp->priv;
1055         u32 ret = 0;
1056
1057         spin_lock_bh(&tp->lock);
1058
1059         if (tg3_writephy(tp, reg, val))
1060                 ret = -EIO;
1061
1062         spin_unlock_bh(&tp->lock);
1063
1064         return ret;
1065 }
1066
1067 static int tg3_mdio_reset(struct mii_bus *bp)
1068 {
1069         return 0;
1070 }
1071
1072 static void tg3_mdio_config_5785(struct tg3 *tp)
1073 {
1074         u32 val;
1075         struct phy_device *phydev;
1076
1077         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1078         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1079         case PHY_ID_BCM50610:
1080         case PHY_ID_BCM50610M:
1081                 val = MAC_PHYCFG2_50610_LED_MODES;
1082                 break;
1083         case PHY_ID_BCMAC131:
1084                 val = MAC_PHYCFG2_AC131_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8211C:
1087                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1088                 break;
1089         case PHY_ID_RTL8201E:
1090                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091                 break;
1092         default:
1093                 return;
1094         }
1095
1096         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1097                 tw32(MAC_PHYCFG2, val);
1098
1099                 val = tr32(MAC_PHYCFG1);
1100                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1101                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1102                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1103                 tw32(MAC_PHYCFG1, val);
1104
1105                 return;
1106         }
1107
1108         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1109                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1110                        MAC_PHYCFG2_FMODE_MASK_MASK |
1111                        MAC_PHYCFG2_GMODE_MASK_MASK |
1112                        MAC_PHYCFG2_ACT_MASK_MASK   |
1113                        MAC_PHYCFG2_QUAL_MASK_MASK |
1114                        MAC_PHYCFG2_INBAND_ENABLE;
1115
1116         tw32(MAC_PHYCFG2, val);
1117
1118         val = tr32(MAC_PHYCFG1);
1119         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1120                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1121         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1124                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1125                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1126         }
1127         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1128                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1129         tw32(MAC_PHYCFG1, val);
1130
1131         val = tr32(MAC_EXT_RGMII_MODE);
1132         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1133                  MAC_RGMII_MODE_RX_QUALITY |
1134                  MAC_RGMII_MODE_RX_ACTIVITY |
1135                  MAC_RGMII_MODE_RX_ENG_DET |
1136                  MAC_RGMII_MODE_TX_ENABLE |
1137                  MAC_RGMII_MODE_TX_LOWPWR |
1138                  MAC_RGMII_MODE_TX_RESET);
1139         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1141                         val |= MAC_RGMII_MODE_RX_INT_B |
1142                                MAC_RGMII_MODE_RX_QUALITY |
1143                                MAC_RGMII_MODE_RX_ACTIVITY |
1144                                MAC_RGMII_MODE_RX_ENG_DET;
1145                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1146                         val |= MAC_RGMII_MODE_TX_ENABLE |
1147                                MAC_RGMII_MODE_TX_LOWPWR |
1148                                MAC_RGMII_MODE_TX_RESET;
1149         }
1150         tw32(MAC_EXT_RGMII_MODE, val);
1151 }
1152
1153 static void tg3_mdio_start(struct tg3 *tp)
1154 {
1155         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1156         tw32_f(MAC_MI_MODE, tp->mi_mode);
1157         udelay(80);
1158
1159         if (tg3_flag(tp, MDIOBUS_INITED) &&
1160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1161                 tg3_mdio_config_5785(tp);
1162 }
1163
1164 static int tg3_mdio_init(struct tg3 *tp)
1165 {
1166         int i;
1167         u32 reg;
1168         struct phy_device *phydev;
1169
1170         if (tg3_flag(tp, 5717_PLUS)) {
1171                 u32 is_serdes;
1172
1173                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1174
1175                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1176                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1177                 else
1178                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1179                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1180                 if (is_serdes)
1181                         tp->phy_addr += 7;
1182         } else
1183                 tp->phy_addr = TG3_PHY_MII_ADDR;
1184
1185         tg3_mdio_start(tp);
1186
1187         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188                 return 0;
1189
1190         tp->mdio_bus = mdiobus_alloc();
1191         if (tp->mdio_bus == NULL)
1192                 return -ENOMEM;
1193
1194         tp->mdio_bus->name     = "tg3 mdio bus";
1195         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1196                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1197         tp->mdio_bus->priv     = tp;
1198         tp->mdio_bus->parent   = &tp->pdev->dev;
1199         tp->mdio_bus->read     = &tg3_mdio_read;
1200         tp->mdio_bus->write    = &tg3_mdio_write;
1201         tp->mdio_bus->reset    = &tg3_mdio_reset;
1202         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1203         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1204
1205         for (i = 0; i < PHY_MAX_ADDR; i++)
1206                 tp->mdio_bus->irq[i] = PHY_POLL;
1207
1208         /* The bus registration will look for all the PHYs on the mdio bus.
1209          * Unfortunately, it does not ensure the PHY is powered up before
1210          * accessing the PHY ID registers.  A chip reset is the
1211          * quickest way to bring the device back to an operational state..
1212          */
1213         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1214                 tg3_bmcr_reset(tp);
1215
1216         i = mdiobus_register(tp->mdio_bus);
1217         if (i) {
1218                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1219                 mdiobus_free(tp->mdio_bus);
1220                 return i;
1221         }
1222
1223         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1224
1225         if (!phydev || !phydev->drv) {
1226                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1227                 mdiobus_unregister(tp->mdio_bus);
1228                 mdiobus_free(tp->mdio_bus);
1229                 return -ENODEV;
1230         }
1231
1232         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233         case PHY_ID_BCM57780:
1234                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1235                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1236                 break;
1237         case PHY_ID_BCM50610:
1238         case PHY_ID_BCM50610M:
1239                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1240                                      PHY_BRCM_RX_REFCLK_UNUSED |
1241                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1242                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1243                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1244                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1247                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1248                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1249                 /* fallthru */
1250         case PHY_ID_RTL8211C:
1251                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1252                 break;
1253         case PHY_ID_RTL8201E:
1254         case PHY_ID_BCMAC131:
1255                 phydev->interface = PHY_INTERFACE_MODE_MII;
1256                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1257                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1258                 break;
1259         }
1260
1261         tg3_flag_set(tp, MDIOBUS_INITED);
1262
1263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1264                 tg3_mdio_config_5785(tp);
1265
1266         return 0;
1267 }
1268
1269 static void tg3_mdio_fini(struct tg3 *tp)
1270 {
1271         if (tg3_flag(tp, MDIOBUS_INITED)) {
1272                 tg3_flag_clear(tp, MDIOBUS_INITED);
1273                 mdiobus_unregister(tp->mdio_bus);
1274                 mdiobus_free(tp->mdio_bus);
1275         }
1276 }
1277
1278 /* tp->lock is held. */
1279 static inline void tg3_generate_fw_event(struct tg3 *tp)
1280 {
1281         u32 val;
1282
1283         val = tr32(GRC_RX_CPU_EVENT);
1284         val |= GRC_RX_CPU_DRIVER_EVENT;
1285         tw32_f(GRC_RX_CPU_EVENT, val);
1286
1287         tp->last_event_jiffies = jiffies;
1288 }
1289
1290 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1291
1292 /* tp->lock is held. */
1293 static void tg3_wait_for_event_ack(struct tg3 *tp)
1294 {
1295         int i;
1296         unsigned int delay_cnt;
1297         long time_remain;
1298
1299         /* If enough time has passed, no wait is necessary. */
1300         time_remain = (long)(tp->last_event_jiffies + 1 +
1301                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1302                       (long)jiffies;
1303         if (time_remain < 0)
1304                 return;
1305
1306         /* Check if we can shorten the wait time. */
1307         delay_cnt = jiffies_to_usecs(time_remain);
1308         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1309                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1310         delay_cnt = (delay_cnt >> 3) + 1;
1311
1312         for (i = 0; i < delay_cnt; i++) {
1313                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314                         break;
1315                 udelay(8);
1316         }
1317 }
1318
1319 /* tp->lock is held. */
1320 static void tg3_ump_link_report(struct tg3 *tp)
1321 {
1322         u32 reg;
1323         u32 val;
1324
1325         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326                 return;
1327
1328         tg3_wait_for_event_ack(tp);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1331
1332         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1333
1334         val = 0;
1335         if (!tg3_readphy(tp, MII_BMCR, &reg))
1336                 val = reg << 16;
1337         if (!tg3_readphy(tp, MII_BMSR, &reg))
1338                 val |= (reg & 0xffff);
1339         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1340
1341         val = 0;
1342         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1343                 val = reg << 16;
1344         if (!tg3_readphy(tp, MII_LPA, &reg))
1345                 val |= (reg & 0xffff);
1346         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1347
1348         val = 0;
1349         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1350                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1351                         val = reg << 16;
1352                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1353                         val |= (reg & 0xffff);
1354         }
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1356
1357         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1358                 val = reg << 16;
1359         else
1360                 val = 0;
1361         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1362
1363         tg3_generate_fw_event(tp);
1364 }
1365
1366 static void tg3_link_report(struct tg3 *tp)
1367 {
1368         if (!netif_carrier_ok(tp->dev)) {
1369                 netif_info(tp, link, tp->dev, "Link is down\n");
1370                 tg3_ump_link_report(tp);
1371         } else if (netif_msg_link(tp)) {
1372                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1373                             (tp->link_config.active_speed == SPEED_1000 ?
1374                              1000 :
1375                              (tp->link_config.active_speed == SPEED_100 ?
1376                               100 : 10)),
1377                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1378                              "full" : "half"));
1379
1380                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1382                             "on" : "off",
1383                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384                             "on" : "off");
1385
1386                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1387                         netdev_info(tp->dev, "EEE is %s\n",
1388                                     tp->setlpicnt ? "enabled" : "disabled");
1389
1390                 tg3_ump_link_report(tp);
1391         }
1392 }
1393
1394 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1395 {
1396         u16 miireg;
1397
1398         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1399                 miireg = ADVERTISE_PAUSE_CAP;
1400         else if (flow_ctrl & FLOW_CTRL_TX)
1401                 miireg = ADVERTISE_PAUSE_ASYM;
1402         else if (flow_ctrl & FLOW_CTRL_RX)
1403                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1404         else
1405                 miireg = 0;
1406
1407         return miireg;
1408 }
1409
1410 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1411 {
1412         u16 miireg;
1413
1414         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1415                 miireg = ADVERTISE_1000XPAUSE;
1416         else if (flow_ctrl & FLOW_CTRL_TX)
1417                 miireg = ADVERTISE_1000XPSE_ASYM;
1418         else if (flow_ctrl & FLOW_CTRL_RX)
1419                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420         else
1421                 miireg = 0;
1422
1423         return miireg;
1424 }
1425
1426 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1427 {
1428         u8 cap = 0;
1429
1430         if (lcladv & ADVERTISE_1000XPAUSE) {
1431                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435                                 cap = FLOW_CTRL_RX;
1436                 } else {
1437                         if (rmtadv & LPA_1000XPAUSE)
1438                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1439                 }
1440         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1441                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1442                         cap = FLOW_CTRL_TX;
1443         }
1444
1445         return cap;
1446 }
1447
1448 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1449 {
1450         u8 autoneg;
1451         u8 flowctrl = 0;
1452         u32 old_rx_mode = tp->rx_mode;
1453         u32 old_tx_mode = tp->tx_mode;
1454
1455         if (tg3_flag(tp, USE_PHYLIB))
1456                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1457         else
1458                 autoneg = tp->link_config.autoneg;
1459
1460         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1461                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1462                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1463                 else
1464                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1465         } else
1466                 flowctrl = tp->link_config.flowctrl;
1467
1468         tp->link_config.active_flowctrl = flowctrl;
1469
1470         if (flowctrl & FLOW_CTRL_RX)
1471                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_rx_mode != tp->rx_mode)
1476                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1477
1478         if (flowctrl & FLOW_CTRL_TX)
1479                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1480         else
1481                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1482
1483         if (old_tx_mode != tp->tx_mode)
1484                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1485 }
1486
1487 static void tg3_adjust_link(struct net_device *dev)
1488 {
1489         u8 oldflowctrl, linkmesg = 0;
1490         u32 mac_mode, lcl_adv, rmt_adv;
1491         struct tg3 *tp = netdev_priv(dev);
1492         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1493
1494         spin_lock_bh(&tp->lock);
1495
1496         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1497                                     MAC_MODE_HALF_DUPLEX);
1498
1499         oldflowctrl = tp->link_config.active_flowctrl;
1500
1501         if (phydev->link) {
1502                 lcl_adv = 0;
1503                 rmt_adv = 0;
1504
1505                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507                 else if (phydev->speed == SPEED_1000 ||
1508                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1509                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1510                 else
1511                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1512
1513                 if (phydev->duplex == DUPLEX_HALF)
1514                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1515                 else {
1516                         lcl_adv = tg3_advert_flowctrl_1000T(
1517                                   tp->link_config.flowctrl);
1518
1519                         if (phydev->pause)
1520                                 rmt_adv = LPA_PAUSE_CAP;
1521                         if (phydev->asym_pause)
1522                                 rmt_adv |= LPA_PAUSE_ASYM;
1523                 }
1524
1525                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1526         } else
1527                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1528
1529         if (mac_mode != tp->mac_mode) {
1530                 tp->mac_mode = mac_mode;
1531                 tw32_f(MAC_MODE, tp->mac_mode);
1532                 udelay(40);
1533         }
1534
1535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1536                 if (phydev->speed == SPEED_10)
1537                         tw32(MAC_MI_STAT,
1538                              MAC_MI_STAT_10MBPS_MODE |
1539                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540                 else
1541                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542         }
1543
1544         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549         else
1550                 tw32(MAC_TX_LENGTHS,
1551                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1552                       (6 << TX_LENGTHS_IPG_SHIFT) |
1553                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1554
1555         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1556             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1557             phydev->speed != tp->link_config.active_speed ||
1558             phydev->duplex != tp->link_config.active_duplex ||
1559             oldflowctrl != tp->link_config.active_flowctrl)
1560                 linkmesg = 1;
1561
1562         tp->link_config.active_speed = phydev->speed;
1563         tp->link_config.active_duplex = phydev->duplex;
1564
1565         spin_unlock_bh(&tp->lock);
1566
1567         if (linkmesg)
1568                 tg3_link_report(tp);
1569 }
1570
1571 static int tg3_phy_init(struct tg3 *tp)
1572 {
1573         struct phy_device *phydev;
1574
1575         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576                 return 0;
1577
1578         /* Bring the PHY back to a known state. */
1579         tg3_bmcr_reset(tp);
1580
1581         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1582
1583         /* Attach the MAC to the PHY. */
1584         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1585                              phydev->dev_flags, phydev->interface);
1586         if (IS_ERR(phydev)) {
1587                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1588                 return PTR_ERR(phydev);
1589         }
1590
1591         /* Mask with MAC supported features. */
1592         switch (phydev->interface) {
1593         case PHY_INTERFACE_MODE_GMII:
1594         case PHY_INTERFACE_MODE_RGMII:
1595                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1596                         phydev->supported &= (PHY_GBIT_FEATURES |
1597                                               SUPPORTED_Pause |
1598                                               SUPPORTED_Asym_Pause);
1599                         break;
1600                 }
1601                 /* fallthru */
1602         case PHY_INTERFACE_MODE_MII:
1603                 phydev->supported &= (PHY_BASIC_FEATURES |
1604                                       SUPPORTED_Pause |
1605                                       SUPPORTED_Asym_Pause);
1606                 break;
1607         default:
1608                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1609                 return -EINVAL;
1610         }
1611
1612         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1613
1614         phydev->advertising = phydev->supported;
1615
1616         return 0;
1617 }
1618
1619 static void tg3_phy_start(struct tg3 *tp)
1620 {
1621         struct phy_device *phydev;
1622
1623         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624                 return;
1625
1626         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1627
1628         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1629                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1630                 phydev->speed = tp->link_config.orig_speed;
1631                 phydev->duplex = tp->link_config.orig_duplex;
1632                 phydev->autoneg = tp->link_config.orig_autoneg;
1633                 phydev->advertising = tp->link_config.orig_advertising;
1634         }
1635
1636         phy_start(phydev);
1637
1638         phy_start_aneg(phydev);
1639 }
1640
1641 static void tg3_phy_stop(struct tg3 *tp)
1642 {
1643         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644                 return;
1645
1646         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1647 }
1648
1649 static void tg3_phy_fini(struct tg3 *tp)
1650 {
1651         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1652                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1653                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1654         }
1655 }
1656
1657 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1658 {
1659         u32 phytest;
1660
1661         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662                 u32 phy;
1663
1664                 tg3_writephy(tp, MII_TG3_FET_TEST,
1665                              phytest | MII_TG3_FET_SHADOW_EN);
1666                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1667                         if (enable)
1668                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         else
1670                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1672                 }
1673                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1674         }
1675 }
1676
1677 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1678 {
1679         u32 reg;
1680
1681         if (!tg3_flag(tp, 5705_PLUS) ||
1682             (tg3_flag(tp, 5717_PLUS) &&
1683              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684                 return;
1685
1686         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1687                 tg3_phy_fet_toggle_apd(tp, enable);
1688                 return;
1689         }
1690
1691         reg = MII_TG3_MISC_SHDW_WREN |
1692               MII_TG3_MISC_SHDW_SCR5_SEL |
1693               MII_TG3_MISC_SHDW_SCR5_LPED |
1694               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1695               MII_TG3_MISC_SHDW_SCR5_SDTL |
1696               MII_TG3_MISC_SHDW_SCR5_C125OE;
1697         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1698                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1699
1700         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1701
1702
1703         reg = MII_TG3_MISC_SHDW_WREN |
1704               MII_TG3_MISC_SHDW_APD_SEL |
1705               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1706         if (enable)
1707                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1708
1709         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1710 }
1711
1712 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1713 {
1714         u32 phy;
1715
1716         if (!tg3_flag(tp, 5705_PLUS) ||
1717             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718                 return;
1719
1720         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721                 u32 ephy;
1722
1723                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1724                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1725
1726                         tg3_writephy(tp, MII_TG3_FET_TEST,
1727                                      ephy | MII_TG3_FET_SHADOW_EN);
1728                         if (!tg3_readphy(tp, reg, &phy)) {
1729                                 if (enable)
1730                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 else
1732                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733                                 tg3_writephy(tp, reg, phy);
1734                         }
1735                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1736                 }
1737         } else {
1738                 int ret;
1739
1740                 ret = tg3_phy_auxctl_read(tp,
1741                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742                 if (!ret) {
1743                         if (enable)
1744                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         else
1746                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747                         tg3_phy_auxctl_write(tp,
1748                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1749                 }
1750         }
1751 }
1752
1753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1754 {
1755         int ret;
1756         u32 val;
1757
1758         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759                 return;
1760
1761         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1762         if (!ret)
1763                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1764                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1765 }
1766
1767 static void tg3_phy_apply_otp(struct tg3 *tp)
1768 {
1769         u32 otp, phy;
1770
1771         if (!tp->phy_otp)
1772                 return;
1773
1774         otp = tp->phy_otp;
1775
1776         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777                 return;
1778
1779         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1780         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1781         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1782
1783         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1784               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1785         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1786
1787         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1788         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1789         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1790
1791         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1793
1794         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1795         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1796
1797         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1798               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1799         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1800
1801         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1802 }
1803
1804 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1805 {
1806         u32 val;
1807
1808         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809                 return;
1810
1811         tp->setlpicnt = 0;
1812
1813         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1814             current_link_up == 1 &&
1815             tp->link_config.active_duplex == DUPLEX_FULL &&
1816             (tp->link_config.active_speed == SPEED_100 ||
1817              tp->link_config.active_speed == SPEED_1000)) {
1818                 u32 eeectl;
1819
1820                 if (tp->link_config.active_speed == SPEED_1000)
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1822                 else
1823                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1824
1825                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1826
1827                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1828                                   TG3_CL45_D7_EEERES_STAT, &val);
1829
1830                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1831                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1832                         tp->setlpicnt = 2;
1833         }
1834
1835         if (!tp->setlpicnt) {
1836                 val = tr32(TG3_CPMU_EEE_MODE);
1837                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1838         }
1839 }
1840
1841 static void tg3_phy_eee_enable(struct tg3 *tp)
1842 {
1843         u32 val;
1844
1845         if (tp->link_config.active_speed == SPEED_1000 &&
1846             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1847              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1848              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1849             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1850                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1851                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1852         }
1853
1854         val = tr32(TG3_CPMU_EEE_MODE);
1855         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1856 }
1857
1858 static int tg3_wait_macro_done(struct tg3 *tp)
1859 {
1860         int limit = 100;
1861
1862         while (limit--) {
1863                 u32 tmp32;
1864
1865                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1866                         if ((tmp32 & 0x1000) == 0)
1867                                 break;
1868                 }
1869         }
1870         if (limit < 0)
1871                 return -EBUSY;
1872
1873         return 0;
1874 }
1875
1876 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1877 {
1878         static const u32 test_pat[4][6] = {
1879         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1880         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1881         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1882         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1883         };
1884         int chan;
1885
1886         for (chan = 0; chan < 4; chan++) {
1887                 int i;
1888
1889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1890                              (chan * 0x2000) | 0x0200);
1891                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1892
1893                 for (i = 0; i < 6; i++)
1894                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895                                      test_pat[chan][i]);
1896
1897                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1898                 if (tg3_wait_macro_done(tp)) {
1899                         *resetp = 1;
1900                         return -EBUSY;
1901                 }
1902
1903                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1904                              (chan * 0x2000) | 0x0200);
1905                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1906                 if (tg3_wait_macro_done(tp)) {
1907                         *resetp = 1;
1908                         return -EBUSY;
1909                 }
1910
1911                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1912                 if (tg3_wait_macro_done(tp)) {
1913                         *resetp = 1;
1914                         return -EBUSY;
1915                 }
1916
1917                 for (i = 0; i < 6; i += 2) {
1918                         u32 low, high;
1919
1920                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1921                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1922                             tg3_wait_macro_done(tp)) {
1923                                 *resetp = 1;
1924                                 return -EBUSY;
1925                         }
1926                         low &= 0x7fff;
1927                         high &= 0x000f;
1928                         if (low != test_pat[chan][i] ||
1929                             high != test_pat[chan][i+1]) {
1930                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1931                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1932                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1933
1934                                 return -EBUSY;
1935                         }
1936                 }
1937         }
1938
1939         return 0;
1940 }
1941
1942 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1943 {
1944         int chan;
1945
1946         for (chan = 0; chan < 4; chan++) {
1947                 int i;
1948
1949                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1950                              (chan * 0x2000) | 0x0200);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1952                 for (i = 0; i < 6; i++)
1953                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1954                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1955                 if (tg3_wait_macro_done(tp))
1956                         return -EBUSY;
1957         }
1958
1959         return 0;
1960 }
1961
1962 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1963 {
1964         u32 reg32, phy9_orig;
1965         int retries, do_phy_reset, err;
1966
1967         retries = 10;
1968         do_phy_reset = 1;
1969         do {
1970                 if (do_phy_reset) {
1971                         err = tg3_bmcr_reset(tp);
1972                         if (err)
1973                                 return err;
1974                         do_phy_reset = 0;
1975                 }
1976
1977                 /* Disable transmitter and interrupt.  */
1978                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1979                         continue;
1980
1981                 reg32 |= 0x3000;
1982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1983
1984                 /* Set full-duplex, 1000 mbps.  */
1985                 tg3_writephy(tp, MII_BMCR,
1986                              BMCR_FULLDPLX | BMCR_SPEED1000);
1987
1988                 /* Set to master mode.  */
1989                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1990                         continue;
1991
1992                 tg3_writephy(tp, MII_CTRL1000,
1993                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1994
1995                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996                 if (err)
1997                         return err;
1998
1999                 /* Block the PHY control access.  */
2000                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2001
2002                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2003                 if (!err)
2004                         break;
2005         } while (--retries);
2006
2007         err = tg3_phy_reset_chanpat(tp);
2008         if (err)
2009                 return err;
2010
2011         tg3_phydsp_write(tp, 0x8005, 0x0000);
2012
2013         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2014         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2015
2016         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2017
2018         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2019
2020         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2021                 reg32 &= ~0x3000;
2022                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2023         } else if (!err)
2024                 err = -EBUSY;
2025
2026         return err;
2027 }
2028
2029 /* This will reset the tigon3 PHY if there is no valid
2030  * link unless the FORCE argument is non-zero.
2031  */
2032 static int tg3_phy_reset(struct tg3 *tp)
2033 {
2034         u32 val, cpmuctrl;
2035         int err;
2036
2037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2038                 val = tr32(GRC_MISC_CFG);
2039                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2040                 udelay(40);
2041         }
2042         err  = tg3_readphy(tp, MII_BMSR, &val);
2043         err |= tg3_readphy(tp, MII_BMSR, &val);
2044         if (err != 0)
2045                 return -EBUSY;
2046
2047         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2048                 netif_carrier_off(tp->dev);
2049                 tg3_link_report(tp);
2050         }
2051
2052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2055                 err = tg3_phy_reset_5703_4_5(tp);
2056                 if (err)
2057                         return err;
2058                 goto out;
2059         }
2060
2061         cpmuctrl = 0;
2062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2063             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2064                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2065                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066                         tw32(TG3_CPMU_CTRL,
2067                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2068         }
2069
2070         err = tg3_bmcr_reset(tp);
2071         if (err)
2072                 return err;
2073
2074         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2075                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2076                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2077
2078                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2079         }
2080
2081         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2082             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2083                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2084                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2085                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2086                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087                         udelay(40);
2088                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2089                 }
2090         }
2091
2092         if (tg3_flag(tp, 5717_PLUS) &&
2093             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2094                 return 0;
2095
2096         tg3_phy_apply_otp(tp);
2097
2098         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2099                 tg3_phy_toggle_apd(tp, true);
2100         else
2101                 tg3_phy_toggle_apd(tp, false);
2102
2103 out:
2104         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2105             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2106                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2107                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2108                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109         }
2110
2111         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2114         }
2115
2116         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2117                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2118                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2119                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2120                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2121                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2122                 }
2123         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2124                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2126                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2127                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2128                                 tg3_writephy(tp, MII_TG3_TEST1,
2129                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2130                         } else
2131                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2132
2133                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134                 }
2135         }
2136
2137         /* Set Extended packet length bit (bit 14) on all chips that */
2138         /* support jumbo frames */
2139         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2140                 /* Cannot do read-modify-write on 5401 */
2141                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2142         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2143                 /* Set bit 14 with read-modify-write to preserve other bits */
2144                 err = tg3_phy_auxctl_read(tp,
2145                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146                 if (!err)
2147                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2149         }
2150
2151         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2152          * jumbo frames transmission.
2153          */
2154         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2155                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2156                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2157                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2158         }
2159
2160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2161                 /* adjust output voltage */
2162                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2163         }
2164
2165         tg3_phy_toggle_automdix(tp, 1);
2166         tg3_phy_set_wirespeed(tp);
2167         return 0;
2168 }
2169
2170 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2171 {
2172         if (!tg3_flag(tp, IS_NIC))
2173                 return 0;
2174
2175         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2176                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2177
2178         return 0;
2179 }
2180
2181 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2182 {
2183         u32 grc_local_ctrl;
2184
2185         if (!tg3_flag(tp, IS_NIC) ||
2186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2188                 return;
2189
2190         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2191
2192         tw32_wait_f(GRC_LOCAL_CTRL,
2193                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2194                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2195
2196         tw32_wait_f(GRC_LOCAL_CTRL,
2197                     grc_local_ctrl,
2198                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2199
2200         tw32_wait_f(GRC_LOCAL_CTRL,
2201                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2202                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2203 }
2204
2205 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2206 {
2207         if (!tg3_flag(tp, IS_NIC))
2208                 return;
2209
2210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2212                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213                             (GRC_LCLCTRL_GPIO_OE0 |
2214                              GRC_LCLCTRL_GPIO_OE1 |
2215                              GRC_LCLCTRL_GPIO_OE2 |
2216                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2217                              GRC_LCLCTRL_GPIO_OUTPUT1),
2218                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2219         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2220                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2221                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2222                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2223                                      GRC_LCLCTRL_GPIO_OE1 |
2224                                      GRC_LCLCTRL_GPIO_OE2 |
2225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2226                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2227                                      tp->grc_local_ctrl;
2228                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2229                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2230
2231                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2232                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2233                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2234
2235                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2236                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2237                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2238         } else {
2239                 u32 no_gpio2;
2240                 u32 grc_local_ctrl = 0;
2241
2242                 /* Workaround to prevent overdrawing Amps. */
2243                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2244                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2245                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2246                                     grc_local_ctrl,
2247                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2248                 }
2249
2250                 /* On 5753 and variants, GPIO2 cannot be used. */
2251                 no_gpio2 = tp->nic_sram_data_cfg &
2252                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2253
2254                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2255                                   GRC_LCLCTRL_GPIO_OE1 |
2256                                   GRC_LCLCTRL_GPIO_OE2 |
2257                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2258                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2259                 if (no_gpio2) {
2260                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2261                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2262                 }
2263                 tw32_wait_f(GRC_LOCAL_CTRL,
2264                             tp->grc_local_ctrl | grc_local_ctrl,
2265                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2266
2267                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2268
2269                 tw32_wait_f(GRC_LOCAL_CTRL,
2270                             tp->grc_local_ctrl | grc_local_ctrl,
2271                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2272
2273                 if (!no_gpio2) {
2274                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2275                         tw32_wait_f(GRC_LOCAL_CTRL,
2276                                     tp->grc_local_ctrl | grc_local_ctrl,
2277                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2278                 }
2279         }
2280 }
2281
2282 static void tg3_frob_aux_power(struct tg3 *tp)
2283 {
2284         bool need_vaux = false;
2285
2286         /* The GPIOs do something completely different on 57765. */
2287         if (!tg3_flag(tp, IS_NIC) ||
2288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2290                 return;
2291
2292         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2293              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2294              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2296             tp->pdev_peer != tp->pdev) {
2297                 struct net_device *dev_peer;
2298
2299                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2300
2301                 /* remove_one() may have been run on the peer. */
2302                 if (dev_peer) {
2303                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2304
2305                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2306                                 return;
2307
2308                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2309                             tg3_flag(tp_peer, ENABLE_ASF))
2310                                 need_vaux = true;
2311                 }
2312         }
2313
2314         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2315                 need_vaux = true;
2316
2317         if (need_vaux)
2318                 tg3_pwrsrc_switch_to_vaux(tp);
2319         else
2320                 tg3_pwrsrc_die_with_vmain(tp);
2321 }
2322
2323 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2324 {
2325         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2326                 return 1;
2327         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2328                 if (speed != SPEED_10)
2329                         return 1;
2330         } else if (speed == SPEED_10)
2331                 return 1;
2332
2333         return 0;
2334 }
2335
2336 static int tg3_setup_phy(struct tg3 *, int);
2337
2338 #define RESET_KIND_SHUTDOWN     0
2339 #define RESET_KIND_INIT         1
2340 #define RESET_KIND_SUSPEND      2
2341
2342 static void tg3_write_sig_post_reset(struct tg3 *, int);
2343 static int tg3_halt_cpu(struct tg3 *, u32);
2344
2345 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2346 {
2347         u32 val;
2348
2349         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2351                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2352                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2353
2354                         sg_dig_ctrl |=
2355                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2356                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2357                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2358                 }
2359                 return;
2360         }
2361
2362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2363                 tg3_bmcr_reset(tp);
2364                 val = tr32(GRC_MISC_CFG);
2365                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2366                 udelay(40);
2367                 return;
2368         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2369                 u32 phytest;
2370                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2371                         u32 phy;
2372
2373                         tg3_writephy(tp, MII_ADVERTISE, 0);
2374                         tg3_writephy(tp, MII_BMCR,
2375                                      BMCR_ANENABLE | BMCR_ANRESTART);
2376
2377                         tg3_writephy(tp, MII_TG3_FET_TEST,
2378                                      phytest | MII_TG3_FET_SHADOW_EN);
2379                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2380                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2381                                 tg3_writephy(tp,
2382                                              MII_TG3_FET_SHDW_AUXMODE4,
2383                                              phy);
2384                         }
2385                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2386                 }
2387                 return;
2388         } else if (do_low_power) {
2389                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2390                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2391
2392                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2393                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2394                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2395                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2396         }
2397
2398         /* The PHY should not be powered down on some chips because
2399          * of bugs.
2400          */
2401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2403             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2404              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2405                 return;
2406
2407         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2411                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2412                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2413         }
2414
2415         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2416 }
2417
2418 /* tp->lock is held. */
2419 static int tg3_nvram_lock(struct tg3 *tp)
2420 {
2421         if (tg3_flag(tp, NVRAM)) {
2422                 int i;
2423
2424                 if (tp->nvram_lock_cnt == 0) {
2425                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2426                         for (i = 0; i < 8000; i++) {
2427                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2428                                         break;
2429                                 udelay(20);
2430                         }
2431                         if (i == 8000) {
2432                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2433                                 return -ENODEV;
2434                         }
2435                 }
2436                 tp->nvram_lock_cnt++;
2437         }
2438         return 0;
2439 }
2440
2441 /* tp->lock is held. */
2442 static void tg3_nvram_unlock(struct tg3 *tp)
2443 {
2444         if (tg3_flag(tp, NVRAM)) {
2445                 if (tp->nvram_lock_cnt > 0)
2446                         tp->nvram_lock_cnt--;
2447                 if (tp->nvram_lock_cnt == 0)
2448                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2449         }
2450 }
2451
2452 /* tp->lock is held. */
2453 static void tg3_enable_nvram_access(struct tg3 *tp)
2454 {
2455         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2456                 u32 nvaccess = tr32(NVRAM_ACCESS);
2457
2458                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2459         }
2460 }
2461
2462 /* tp->lock is held. */
2463 static void tg3_disable_nvram_access(struct tg3 *tp)
2464 {
2465         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2466                 u32 nvaccess = tr32(NVRAM_ACCESS);
2467
2468                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2469         }
2470 }
2471
2472 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2473                                         u32 offset, u32 *val)
2474 {
2475         u32 tmp;
2476         int i;
2477
2478         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2479                 return -EINVAL;
2480
2481         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2482                                         EEPROM_ADDR_DEVID_MASK |
2483                                         EEPROM_ADDR_READ);
2484         tw32(GRC_EEPROM_ADDR,
2485              tmp |
2486              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2487              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2488               EEPROM_ADDR_ADDR_MASK) |
2489              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2490
2491         for (i = 0; i < 1000; i++) {
2492                 tmp = tr32(GRC_EEPROM_ADDR);
2493
2494                 if (tmp & EEPROM_ADDR_COMPLETE)
2495                         break;
2496                 msleep(1);
2497         }
2498         if (!(tmp & EEPROM_ADDR_COMPLETE))
2499                 return -EBUSY;
2500
2501         tmp = tr32(GRC_EEPROM_DATA);
2502
2503         /*
2504          * The data will always be opposite the native endian
2505          * format.  Perform a blind byteswap to compensate.
2506          */
2507         *val = swab32(tmp);
2508
2509         return 0;
2510 }
2511
2512 #define NVRAM_CMD_TIMEOUT 10000
2513
2514 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2515 {
2516         int i;
2517
2518         tw32(NVRAM_CMD, nvram_cmd);
2519         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2520                 udelay(10);
2521                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2522                         udelay(10);
2523                         break;
2524                 }
2525         }
2526
2527         if (i == NVRAM_CMD_TIMEOUT)
2528                 return -EBUSY;
2529
2530         return 0;
2531 }
2532
2533 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2534 {
2535         if (tg3_flag(tp, NVRAM) &&
2536             tg3_flag(tp, NVRAM_BUFFERED) &&
2537             tg3_flag(tp, FLASH) &&
2538             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2539             (tp->nvram_jedecnum == JEDEC_ATMEL))
2540
2541                 addr = ((addr / tp->nvram_pagesize) <<
2542                         ATMEL_AT45DB0X1B_PAGE_POS) +
2543                        (addr % tp->nvram_pagesize);
2544
2545         return addr;
2546 }
2547
2548 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2549 {
2550         if (tg3_flag(tp, NVRAM) &&
2551             tg3_flag(tp, NVRAM_BUFFERED) &&
2552             tg3_flag(tp, FLASH) &&
2553             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2554             (tp->nvram_jedecnum == JEDEC_ATMEL))
2555
2556                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2557                         tp->nvram_pagesize) +
2558                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2559
2560         return addr;
2561 }
2562
2563 /* NOTE: Data read in from NVRAM is byteswapped according to
2564  * the byteswapping settings for all other register accesses.
2565  * tg3 devices are BE devices, so on a BE machine, the data
2566  * returned will be exactly as it is seen in NVRAM.  On a LE
2567  * machine, the 32-bit value will be byteswapped.
2568  */
2569 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2570 {
2571         int ret;
2572
2573         if (!tg3_flag(tp, NVRAM))
2574                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2575
2576         offset = tg3_nvram_phys_addr(tp, offset);
2577
2578         if (offset > NVRAM_ADDR_MSK)
2579                 return -EINVAL;
2580
2581         ret = tg3_nvram_lock(tp);
2582         if (ret)
2583                 return ret;
2584
2585         tg3_enable_nvram_access(tp);
2586
2587         tw32(NVRAM_ADDR, offset);
2588         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2589                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2590
2591         if (ret == 0)
2592                 *val = tr32(NVRAM_RDDATA);
2593
2594         tg3_disable_nvram_access(tp);
2595
2596         tg3_nvram_unlock(tp);
2597
2598         return ret;
2599 }
2600
2601 /* Ensures NVRAM data is in bytestream format. */
2602 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2603 {
2604         u32 v;
2605         int res = tg3_nvram_read(tp, offset, &v);
2606         if (!res)
2607                 *val = cpu_to_be32(v);
2608         return res;
2609 }
2610
2611 /* tp->lock is held. */
2612 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2613 {
2614         u32 addr_high, addr_low;
2615         int i;
2616
2617         addr_high = ((tp->dev->dev_addr[0] << 8) |
2618                      tp->dev->dev_addr[1]);
2619         addr_low = ((tp->dev->dev_addr[2] << 24) |
2620                     (tp->dev->dev_addr[3] << 16) |
2621                     (tp->dev->dev_addr[4] <<  8) |
2622                     (tp->dev->dev_addr[5] <<  0));
2623         for (i = 0; i < 4; i++) {
2624                 if (i == 1 && skip_mac_1)
2625                         continue;
2626                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2627                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2632                 for (i = 0; i < 12; i++) {
2633                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2634                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2635                 }
2636         }
2637
2638         addr_high = (tp->dev->dev_addr[0] +
2639                      tp->dev->dev_addr[1] +
2640                      tp->dev->dev_addr[2] +
2641                      tp->dev->dev_addr[3] +
2642                      tp->dev->dev_addr[4] +
2643                      tp->dev->dev_addr[5]) &
2644                 TX_BACKOFF_SEED_MASK;
2645         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2646 }
2647
2648 static void tg3_enable_register_access(struct tg3 *tp)
2649 {
2650         /*
2651          * Make sure register accesses (indirect or otherwise) will function
2652          * correctly.
2653          */
2654         pci_write_config_dword(tp->pdev,
2655                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2656 }
2657
2658 static int tg3_power_up(struct tg3 *tp)
2659 {
2660         tg3_enable_register_access(tp);
2661
2662         pci_set_power_state(tp->pdev, PCI_D0);
2663
2664         /* Switch out of Vaux if it is a NIC */
2665         tg3_pwrsrc_switch_to_vmain(tp);
2666
2667         return 0;
2668 }
2669
2670 static int tg3_power_down_prepare(struct tg3 *tp)
2671 {
2672         u32 misc_host_ctrl;
2673         bool device_should_wake, do_low_power;
2674
2675         tg3_enable_register_access(tp);
2676
2677         /* Restore the CLKREQ setting. */
2678         if (tg3_flag(tp, CLKREQ_BUG)) {
2679                 u16 lnkctl;
2680
2681                 pci_read_config_word(tp->pdev,
2682                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2683                                      &lnkctl);
2684                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2685                 pci_write_config_word(tp->pdev,
2686                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2687                                       lnkctl);
2688         }
2689
2690         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2691         tw32(TG3PCI_MISC_HOST_CTRL,
2692              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2693
2694         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2695                              tg3_flag(tp, WOL_ENABLE);
2696
2697         if (tg3_flag(tp, USE_PHYLIB)) {
2698                 do_low_power = false;
2699                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2700                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2701                         struct phy_device *phydev;
2702                         u32 phyid, advertising;
2703
2704                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2705
2706                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2707
2708                         tp->link_config.orig_speed = phydev->speed;
2709                         tp->link_config.orig_duplex = phydev->duplex;
2710                         tp->link_config.orig_autoneg = phydev->autoneg;
2711                         tp->link_config.orig_advertising = phydev->advertising;
2712
2713                         advertising = ADVERTISED_TP |
2714                                       ADVERTISED_Pause |
2715                                       ADVERTISED_Autoneg |
2716                                       ADVERTISED_10baseT_Half;
2717
2718                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2719                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2720                                         advertising |=
2721                                                 ADVERTISED_100baseT_Half |
2722                                                 ADVERTISED_100baseT_Full |
2723                                                 ADVERTISED_10baseT_Full;
2724                                 else
2725                                         advertising |= ADVERTISED_10baseT_Full;
2726                         }
2727
2728                         phydev->advertising = advertising;
2729
2730                         phy_start_aneg(phydev);
2731
2732                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2733                         if (phyid != PHY_ID_BCMAC131) {
2734                                 phyid &= PHY_BCM_OUI_MASK;
2735                                 if (phyid == PHY_BCM_OUI_1 ||
2736                                     phyid == PHY_BCM_OUI_2 ||
2737                                     phyid == PHY_BCM_OUI_3)
2738                                         do_low_power = true;
2739                         }
2740                 }
2741         } else {
2742                 do_low_power = true;
2743
2744                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2745                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2746                         tp->link_config.orig_speed = tp->link_config.speed;
2747                         tp->link_config.orig_duplex = tp->link_config.duplex;
2748                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2749                 }
2750
2751                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2752                         tp->link_config.speed = SPEED_10;
2753                         tp->link_config.duplex = DUPLEX_HALF;
2754                         tp->link_config.autoneg = AUTONEG_ENABLE;
2755                         tg3_setup_phy(tp, 0);
2756                 }
2757         }
2758
2759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2760                 u32 val;
2761
2762                 val = tr32(GRC_VCPU_EXT_CTRL);
2763                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2764         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2765                 int i;
2766                 u32 val;
2767
2768                 for (i = 0; i < 200; i++) {
2769                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2770                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2771                                 break;
2772                         msleep(1);
2773                 }
2774         }
2775         if (tg3_flag(tp, WOL_CAP))
2776                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2777                                                      WOL_DRV_STATE_SHUTDOWN |
2778                                                      WOL_DRV_WOL |
2779                                                      WOL_SET_MAGIC_PKT);
2780
2781         if (device_should_wake) {
2782                 u32 mac_mode;
2783
2784                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2785                         if (do_low_power &&
2786                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2787                                 tg3_phy_auxctl_write(tp,
2788                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2789                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2790                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2791                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2792                                 udelay(40);
2793                         }
2794
2795                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2796                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2797                         else
2798                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2799
2800                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2801                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2802                             ASIC_REV_5700) {
2803                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2804                                              SPEED_100 : SPEED_10;
2805                                 if (tg3_5700_link_polarity(tp, speed))
2806                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2807                                 else
2808                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2809                         }
2810                 } else {
2811                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2812                 }
2813
2814                 if (!tg3_flag(tp, 5750_PLUS))
2815                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2816
2817                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2818                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2819                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2820                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2821
2822                 if (tg3_flag(tp, ENABLE_APE))
2823                         mac_mode |= MAC_MODE_APE_TX_EN |
2824                                     MAC_MODE_APE_RX_EN |
2825                                     MAC_MODE_TDE_ENABLE;
2826
2827                 tw32_f(MAC_MODE, mac_mode);
2828                 udelay(100);
2829
2830                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2831                 udelay(10);
2832         }
2833
2834         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2835             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2836              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2837                 u32 base_val;
2838
2839                 base_val = tp->pci_clock_ctrl;
2840                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2841                              CLOCK_CTRL_TXCLK_DISABLE);
2842
2843                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2844                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2845         } else if (tg3_flag(tp, 5780_CLASS) ||
2846                    tg3_flag(tp, CPMU_PRESENT) ||
2847                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2848                 /* do nothing */
2849         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2850                 u32 newbits1, newbits2;
2851
2852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2853                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2854                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2855                                     CLOCK_CTRL_TXCLK_DISABLE |
2856                                     CLOCK_CTRL_ALTCLK);
2857                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2858                 } else if (tg3_flag(tp, 5705_PLUS)) {
2859                         newbits1 = CLOCK_CTRL_625_CORE;
2860                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2861                 } else {
2862                         newbits1 = CLOCK_CTRL_ALTCLK;
2863                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2864                 }
2865
2866                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2867                             40);
2868
2869                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2870                             40);
2871
2872                 if (!tg3_flag(tp, 5705_PLUS)) {
2873                         u32 newbits3;
2874
2875                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2876                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2877                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2878                                             CLOCK_CTRL_TXCLK_DISABLE |
2879                                             CLOCK_CTRL_44MHZ_CORE);
2880                         } else {
2881                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2882                         }
2883
2884                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2885                                     tp->pci_clock_ctrl | newbits3, 40);
2886                 }
2887         }
2888
2889         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2890                 tg3_power_down_phy(tp, do_low_power);
2891
2892         tg3_frob_aux_power(tp);
2893
2894         /* Workaround for unstable PLL clock */
2895         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2896             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2897                 u32 val = tr32(0x7d00);
2898
2899                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2900                 tw32(0x7d00, val);
2901                 if (!tg3_flag(tp, ENABLE_ASF)) {
2902                         int err;
2903
2904                         err = tg3_nvram_lock(tp);
2905                         tg3_halt_cpu(tp, RX_CPU_BASE);
2906                         if (!err)
2907                                 tg3_nvram_unlock(tp);
2908                 }
2909         }
2910
2911         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2912
2913         return 0;
2914 }
2915
2916 static void tg3_power_down(struct tg3 *tp)
2917 {
2918         tg3_power_down_prepare(tp);
2919
2920         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2921         pci_set_power_state(tp->pdev, PCI_D3hot);
2922 }
2923
2924 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2925 {
2926         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2927         case MII_TG3_AUX_STAT_10HALF:
2928                 *speed = SPEED_10;
2929                 *duplex = DUPLEX_HALF;
2930                 break;
2931
2932         case MII_TG3_AUX_STAT_10FULL:
2933                 *speed = SPEED_10;
2934                 *duplex = DUPLEX_FULL;
2935                 break;
2936
2937         case MII_TG3_AUX_STAT_100HALF:
2938                 *speed = SPEED_100;
2939                 *duplex = DUPLEX_HALF;
2940                 break;
2941
2942         case MII_TG3_AUX_STAT_100FULL:
2943                 *speed = SPEED_100;
2944                 *duplex = DUPLEX_FULL;
2945                 break;
2946
2947         case MII_TG3_AUX_STAT_1000HALF:
2948                 *speed = SPEED_1000;
2949                 *duplex = DUPLEX_HALF;
2950                 break;
2951
2952         case MII_TG3_AUX_STAT_1000FULL:
2953                 *speed = SPEED_1000;
2954                 *duplex = DUPLEX_FULL;
2955                 break;
2956
2957         default:
2958                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2959                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2960                                  SPEED_10;
2961                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2962                                   DUPLEX_HALF;
2963                         break;
2964                 }
2965                 *speed = SPEED_INVALID;
2966                 *duplex = DUPLEX_INVALID;
2967                 break;
2968         }
2969 }
2970
2971 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2972 {
2973         int err = 0;
2974         u32 val, new_adv;
2975
2976         new_adv = ADVERTISE_CSMA;
2977         if (advertise & ADVERTISED_10baseT_Half)
2978                 new_adv |= ADVERTISE_10HALF;
2979         if (advertise & ADVERTISED_10baseT_Full)
2980                 new_adv |= ADVERTISE_10FULL;
2981         if (advertise & ADVERTISED_100baseT_Half)
2982                 new_adv |= ADVERTISE_100HALF;
2983         if (advertise & ADVERTISED_100baseT_Full)
2984                 new_adv |= ADVERTISE_100FULL;
2985
2986         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2987
2988         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2989         if (err)
2990                 goto done;
2991
2992         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2993                 goto done;
2994
2995         new_adv = 0;
2996         if (advertise & ADVERTISED_1000baseT_Half)
2997                 new_adv |= ADVERTISE_1000HALF;
2998         if (advertise & ADVERTISED_1000baseT_Full)
2999                 new_adv |= ADVERTISE_1000FULL;
3000
3001         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3002             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3003                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3004
3005         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3006         if (err)
3007                 goto done;
3008
3009         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3010                 goto done;
3011
3012         tw32(TG3_CPMU_EEE_MODE,
3013              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3014
3015         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3016         if (!err) {
3017                 u32 err2;
3018
3019                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3020                 case ASIC_REV_5717:
3021                 case ASIC_REV_57765:
3022                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3023                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3024                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3025                         /* Fall through */
3026                 case ASIC_REV_5719:
3027                         val = MII_TG3_DSP_TAP26_ALNOKO |
3028                               MII_TG3_DSP_TAP26_RMRXSTO |
3029                               MII_TG3_DSP_TAP26_OPCSINPT;
3030                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3031                 }
3032
3033                 val = 0;
3034                 /* Advertise 100-BaseTX EEE ability */
3035                 if (advertise & ADVERTISED_100baseT_Full)
3036                         val |= MDIO_AN_EEE_ADV_100TX;
3037                 /* Advertise 1000-BaseT EEE ability */
3038                 if (advertise & ADVERTISED_1000baseT_Full)
3039                         val |= MDIO_AN_EEE_ADV_1000T;
3040                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3041
3042                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3043                 if (!err)
3044                         err = err2;
3045         }
3046
3047 done:
3048         return err;
3049 }
3050
3051 static void tg3_phy_copper_begin(struct tg3 *tp)
3052 {
3053         u32 new_adv;
3054         int i;
3055
3056         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3057                 new_adv = ADVERTISED_10baseT_Half |
3058                           ADVERTISED_10baseT_Full;
3059                 if (tg3_flag(tp, WOL_SPEED_100MB))
3060                         new_adv |= ADVERTISED_100baseT_Half |
3061                                    ADVERTISED_100baseT_Full;
3062
3063                 tg3_phy_autoneg_cfg(tp, new_adv,
3064                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3065         } else if (tp->link_config.speed == SPEED_INVALID) {
3066                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3067                         tp->link_config.advertising &=
3068                                 ~(ADVERTISED_1000baseT_Half |
3069                                   ADVERTISED_1000baseT_Full);
3070
3071                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3072                                     tp->link_config.flowctrl);
3073         } else {
3074                 /* Asking for a specific link mode. */
3075                 if (tp->link_config.speed == SPEED_1000) {
3076                         if (tp->link_config.duplex == DUPLEX_FULL)
3077                                 new_adv = ADVERTISED_1000baseT_Full;
3078                         else
3079                                 new_adv = ADVERTISED_1000baseT_Half;
3080                 } else if (tp->link_config.speed == SPEED_100) {
3081                         if (tp->link_config.duplex == DUPLEX_FULL)
3082                                 new_adv = ADVERTISED_100baseT_Full;
3083                         else
3084                                 new_adv = ADVERTISED_100baseT_Half;
3085                 } else {
3086                         if (tp->link_config.duplex == DUPLEX_FULL)
3087                                 new_adv = ADVERTISED_10baseT_Full;
3088                         else
3089                                 new_adv = ADVERTISED_10baseT_Half;
3090                 }
3091
3092                 tg3_phy_autoneg_cfg(tp, new_adv,
3093                                     tp->link_config.flowctrl);
3094         }
3095
3096         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3097             tp->link_config.speed != SPEED_INVALID) {
3098                 u32 bmcr, orig_bmcr;
3099
3100                 tp->link_config.active_speed = tp->link_config.speed;
3101                 tp->link_config.active_duplex = tp->link_config.duplex;
3102
3103                 bmcr = 0;
3104                 switch (tp->link_config.speed) {
3105                 default:
3106                 case SPEED_10:
3107                         break;
3108
3109                 case SPEED_100:
3110                         bmcr |= BMCR_SPEED100;
3111                         break;
3112
3113                 case SPEED_1000:
3114                         bmcr |= BMCR_SPEED1000;
3115                         break;
3116                 }
3117
3118                 if (tp->link_config.duplex == DUPLEX_FULL)
3119                         bmcr |= BMCR_FULLDPLX;
3120
3121                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3122                     (bmcr != orig_bmcr)) {
3123                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3124                         for (i = 0; i < 1500; i++) {
3125                                 u32 tmp;
3126
3127                                 udelay(10);
3128                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3129                                     tg3_readphy(tp, MII_BMSR, &tmp))
3130                                         continue;
3131                                 if (!(tmp & BMSR_LSTATUS)) {
3132                                         udelay(40);
3133                                         break;
3134                                 }
3135                         }
3136                         tg3_writephy(tp, MII_BMCR, bmcr);
3137                         udelay(40);
3138                 }
3139         } else {
3140                 tg3_writephy(tp, MII_BMCR,
3141                              BMCR_ANENABLE | BMCR_ANRESTART);
3142         }
3143 }
3144
3145 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3146 {
3147         int err;
3148
3149         /* Turn off tap power management. */
3150         /* Set Extended packet length bit */
3151         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3152
3153         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3154         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3155         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3156         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3157         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3158
3159         udelay(40);
3160
3161         return err;
3162 }
3163
3164 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3165 {
3166         u32 adv_reg, all_mask = 0;
3167
3168         if (mask & ADVERTISED_10baseT_Half)
3169                 all_mask |= ADVERTISE_10HALF;
3170         if (mask & ADVERTISED_10baseT_Full)
3171                 all_mask |= ADVERTISE_10FULL;
3172         if (mask & ADVERTISED_100baseT_Half)
3173                 all_mask |= ADVERTISE_100HALF;
3174         if (mask & ADVERTISED_100baseT_Full)
3175                 all_mask |= ADVERTISE_100FULL;
3176
3177         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3178                 return 0;
3179
3180         if ((adv_reg & all_mask) != all_mask)
3181                 return 0;
3182         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3183                 u32 tg3_ctrl;
3184
3185                 all_mask = 0;
3186                 if (mask & ADVERTISED_1000baseT_Half)
3187                         all_mask |= ADVERTISE_1000HALF;
3188                 if (mask & ADVERTISED_1000baseT_Full)
3189                         all_mask |= ADVERTISE_1000FULL;
3190
3191                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3192                         return 0;
3193
3194                 if ((tg3_ctrl & all_mask) != all_mask)
3195                         return 0;
3196         }
3197         return 1;
3198 }
3199
3200 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3201 {
3202         u32 curadv, reqadv;
3203
3204         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3205                 return 1;
3206
3207         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3208         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3209
3210         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3211                 if (curadv != reqadv)
3212                         return 0;
3213
3214                 if (tg3_flag(tp, PAUSE_AUTONEG))
3215                         tg3_readphy(tp, MII_LPA, rmtadv);
3216         } else {
3217                 /* Reprogram the advertisement register, even if it
3218                  * does not affect the current link.  If the link
3219                  * gets renegotiated in the future, we can save an
3220                  * additional renegotiation cycle by advertising
3221                  * it correctly in the first place.
3222                  */
3223                 if (curadv != reqadv) {
3224                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3225                                      ADVERTISE_PAUSE_ASYM);
3226                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3227                 }
3228         }
3229
3230         return 1;
3231 }
3232
3233 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3234 {
3235         int current_link_up;
3236         u32 bmsr, val;
3237         u32 lcl_adv, rmt_adv;
3238         u16 current_speed;
3239         u8 current_duplex;
3240         int i, err;
3241
3242         tw32(MAC_EVENT, 0);
3243
3244         tw32_f(MAC_STATUS,
3245              (MAC_STATUS_SYNC_CHANGED |
3246               MAC_STATUS_CFG_CHANGED |
3247               MAC_STATUS_MI_COMPLETION |
3248               MAC_STATUS_LNKSTATE_CHANGED));
3249         udelay(40);
3250
3251         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3252                 tw32_f(MAC_MI_MODE,
3253                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3254                 udelay(80);
3255         }
3256
3257         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3258
3259         /* Some third-party PHYs need to be reset on link going
3260          * down.
3261          */
3262         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3263              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3264              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3265             netif_carrier_ok(tp->dev)) {
3266                 tg3_readphy(tp, MII_BMSR, &bmsr);
3267                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3268                     !(bmsr & BMSR_LSTATUS))
3269                         force_reset = 1;
3270         }
3271         if (force_reset)
3272                 tg3_phy_reset(tp);
3273
3274         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3275                 tg3_readphy(tp, MII_BMSR, &bmsr);
3276                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3277                     !tg3_flag(tp, INIT_COMPLETE))
3278                         bmsr = 0;
3279
3280                 if (!(bmsr & BMSR_LSTATUS)) {
3281                         err = tg3_init_5401phy_dsp(tp);
3282                         if (err)
3283                                 return err;
3284
3285                         tg3_readphy(tp, MII_BMSR, &bmsr);
3286                         for (i = 0; i < 1000; i++) {
3287                                 udelay(10);
3288                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3289                                     (bmsr & BMSR_LSTATUS)) {
3290                                         udelay(40);
3291                                         break;
3292                                 }
3293                         }
3294
3295                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3296                             TG3_PHY_REV_BCM5401_B0 &&
3297                             !(bmsr & BMSR_LSTATUS) &&
3298                             tp->link_config.active_speed == SPEED_1000) {
3299                                 err = tg3_phy_reset(tp);
3300                                 if (!err)
3301                                         err = tg3_init_5401phy_dsp(tp);
3302                                 if (err)
3303                                         return err;
3304                         }
3305                 }
3306         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3307                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3308                 /* 5701 {A0,B0} CRC bug workaround */
3309                 tg3_writephy(tp, 0x15, 0x0a75);
3310                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3311                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3312                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3313         }
3314
3315         /* Clear pending interrupts... */
3316         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3317         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3318
3319         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3320                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3321         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3322                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3323
3324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3325             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3326                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3327                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3328                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3329                 else
3330                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3331         }
3332
3333         current_link_up = 0;
3334         current_speed = SPEED_INVALID;
3335         current_duplex = DUPLEX_INVALID;
3336
3337         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3338                 err = tg3_phy_auxctl_read(tp,
3339                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3340                                           &val);
3341                 if (!err && !(val & (1 << 10))) {
3342                         tg3_phy_auxctl_write(tp,
3343                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3344                                              val | (1 << 10));
3345                         goto relink;
3346                 }
3347         }
3348
3349         bmsr = 0;
3350         for (i = 0; i < 100; i++) {
3351                 tg3_readphy(tp, MII_BMSR, &bmsr);
3352                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3353                     (bmsr & BMSR_LSTATUS))
3354                         break;
3355                 udelay(40);
3356         }
3357
3358         if (bmsr & BMSR_LSTATUS) {
3359                 u32 aux_stat, bmcr;
3360
3361                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3362                 for (i = 0; i < 2000; i++) {
3363                         udelay(10);
3364                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3365                             aux_stat)
3366                                 break;
3367                 }
3368
3369                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3370                                              &current_speed,
3371                                              &current_duplex);
3372
3373                 bmcr = 0;
3374                 for (i = 0; i < 200; i++) {
3375                         tg3_readphy(tp, MII_BMCR, &bmcr);
3376                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3377                                 continue;
3378                         if (bmcr && bmcr != 0x7fff)
3379                                 break;
3380                         udelay(10);
3381                 }
3382
3383                 lcl_adv = 0;
3384                 rmt_adv = 0;
3385
3386                 tp->link_config.active_speed = current_speed;
3387                 tp->link_config.active_duplex = current_duplex;
3388
3389                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3390                         if ((bmcr & BMCR_ANENABLE) &&
3391                             tg3_copper_is_advertising_all(tp,
3392                                                 tp->link_config.advertising)) {
3393                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3394                                                                   &rmt_adv))
3395                                         current_link_up = 1;
3396                         }
3397                 } else {
3398                         if (!(bmcr & BMCR_ANENABLE) &&
3399                             tp->link_config.speed == current_speed &&
3400                             tp->link_config.duplex == current_duplex &&
3401                             tp->link_config.flowctrl ==
3402                             tp->link_config.active_flowctrl) {
3403                                 current_link_up = 1;
3404                         }
3405                 }
3406
3407                 if (current_link_up == 1 &&
3408                     tp->link_config.active_duplex == DUPLEX_FULL)
3409                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3410         }
3411
3412 relink:
3413         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3414                 tg3_phy_copper_begin(tp);
3415
3416                 tg3_readphy(tp, MII_BMSR, &bmsr);
3417                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3418                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3419                         current_link_up = 1;
3420         }
3421
3422         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3423         if (current_link_up == 1) {
3424                 if (tp->link_config.active_speed == SPEED_100 ||
3425                     tp->link_config.active_speed == SPEED_10)
3426                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3427                 else
3428                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3429         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3430                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3431         else
3432                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3433
3434         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3435         if (tp->link_config.active_duplex == DUPLEX_HALF)
3436                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3437
3438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3439                 if (current_link_up == 1 &&
3440                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3441                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3442                 else
3443                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3444         }
3445
3446         /* ??? Without this setting Netgear GA302T PHY does not
3447          * ??? send/receive packets...
3448          */
3449         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3450             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3451                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3452                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3453                 udelay(80);
3454         }
3455
3456         tw32_f(MAC_MODE, tp->mac_mode);
3457         udelay(40);
3458
3459         tg3_phy_eee_adjust(tp, current_link_up);
3460
3461         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3462                 /* Polled via timer. */
3463                 tw32_f(MAC_EVENT, 0);
3464         } else {
3465                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3466         }
3467         udelay(40);
3468
3469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3470             current_link_up == 1 &&
3471             tp->link_config.active_speed == SPEED_1000 &&
3472             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3473                 udelay(120);
3474                 tw32_f(MAC_STATUS,
3475                      (MAC_STATUS_SYNC_CHANGED |
3476                       MAC_STATUS_CFG_CHANGED));
3477                 udelay(40);
3478                 tg3_write_mem(tp,
3479                               NIC_SRAM_FIRMWARE_MBOX,
3480                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3481         }
3482
3483         /* Prevent send BD corruption. */
3484         if (tg3_flag(tp, CLKREQ_BUG)) {
3485                 u16 oldlnkctl, newlnkctl;
3486
3487                 pci_read_config_word(tp->pdev,
3488                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3489                                      &oldlnkctl);
3490                 if (tp->link_config.active_speed == SPEED_100 ||
3491                     tp->link_config.active_speed == SPEED_10)
3492                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3493                 else
3494                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3495                 if (newlnkctl != oldlnkctl)
3496                         pci_write_config_word(tp->pdev,
3497                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3498                                               newlnkctl);
3499         }
3500
3501         if (current_link_up != netif_carrier_ok(tp->dev)) {
3502                 if (current_link_up)
3503                         netif_carrier_on(tp->dev);
3504                 else
3505                         netif_carrier_off(tp->dev);
3506                 tg3_link_report(tp);
3507         }
3508
3509         return 0;
3510 }
3511
3512 struct tg3_fiber_aneginfo {
3513         int state;
3514 #define ANEG_STATE_UNKNOWN              0
3515 #define ANEG_STATE_AN_ENABLE            1
3516 #define ANEG_STATE_RESTART_INIT         2
3517 #define ANEG_STATE_RESTART              3
3518 #define ANEG_STATE_DISABLE_LINK_OK      4
3519 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3520 #define ANEG_STATE_ABILITY_DETECT       6
3521 #define ANEG_STATE_ACK_DETECT_INIT      7
3522 #define ANEG_STATE_ACK_DETECT           8
3523 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3524 #define ANEG_STATE_COMPLETE_ACK         10
3525 #define ANEG_STATE_IDLE_DETECT_INIT     11
3526 #define ANEG_STATE_IDLE_DETECT          12
3527 #define ANEG_STATE_LINK_OK              13
3528 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3529 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3530
3531         u32 flags;
3532 #define MR_AN_ENABLE            0x00000001
3533 #define MR_RESTART_AN           0x00000002
3534 #define MR_AN_COMPLETE          0x00000004
3535 #define MR_PAGE_RX              0x00000008
3536 #define MR_NP_LOADED            0x00000010
3537 #define MR_TOGGLE_TX            0x00000020
3538 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3539 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3540 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3541 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3542 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3543 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3544 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3545 #define MR_TOGGLE_RX            0x00002000
3546 #define MR_NP_RX                0x00004000
3547
3548 #define MR_LINK_OK              0x80000000
3549
3550         unsigned long link_time, cur_time;
3551
3552         u32 ability_match_cfg;
3553         int ability_match_count;
3554
3555         char ability_match, idle_match, ack_match;
3556
3557         u32 txconfig, rxconfig;
3558 #define ANEG_CFG_NP             0x00000080
3559 #define ANEG_CFG_ACK            0x00000040
3560 #define ANEG_CFG_RF2            0x00000020
3561 #define ANEG_CFG_RF1            0x00000010
3562 #define ANEG_CFG_PS2            0x00000001
3563 #define ANEG_CFG_PS1            0x00008000
3564 #define ANEG_CFG_HD             0x00004000
3565 #define ANEG_CFG_FD             0x00002000
3566 #define ANEG_CFG_INVAL          0x00001f06
3567
3568 };
3569 #define ANEG_OK         0
3570 #define ANEG_DONE       1
3571 #define ANEG_TIMER_ENAB 2
3572 #define ANEG_FAILED     -1
3573
3574 #define ANEG_STATE_SETTLE_TIME  10000
3575
3576 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3577                                    struct tg3_fiber_aneginfo *ap)
3578 {
3579         u16 flowctrl;
3580         unsigned long delta;
3581         u32 rx_cfg_reg;
3582         int ret;
3583
3584         if (ap->state == ANEG_STATE_UNKNOWN) {
3585                 ap->rxconfig = 0;
3586                 ap->link_time = 0;
3587                 ap->cur_time = 0;
3588                 ap->ability_match_cfg = 0;
3589                 ap->ability_match_count = 0;
3590                 ap->ability_match = 0;
3591                 ap->idle_match = 0;
3592                 ap->ack_match = 0;
3593         }
3594         ap->cur_time++;
3595
3596         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3597                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3598
3599                 if (rx_cfg_reg != ap->ability_match_cfg) {
3600                         ap->ability_match_cfg = rx_cfg_reg;
3601                         ap->ability_match = 0;
3602                         ap->ability_match_count = 0;
3603                 } else {
3604                         if (++ap->ability_match_count > 1) {
3605                                 ap->ability_match = 1;
3606                                 ap->ability_match_cfg = rx_cfg_reg;
3607                         }
3608                 }
3609                 if (rx_cfg_reg & ANEG_CFG_ACK)
3610                         ap->ack_match = 1;
3611                 else
3612                         ap->ack_match = 0;
3613
3614                 ap->idle_match = 0;
3615         } else {
3616                 ap->idle_match = 1;
3617                 ap->ability_match_cfg = 0;
3618                 ap->ability_match_count = 0;
3619                 ap->ability_match = 0;
3620                 ap->ack_match = 0;
3621
3622                 rx_cfg_reg = 0;
3623         }
3624
3625         ap->rxconfig = rx_cfg_reg;
3626         ret = ANEG_OK;
3627
3628         switch (ap->state) {
3629         case ANEG_STATE_UNKNOWN:
3630                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3631                         ap->state = ANEG_STATE_AN_ENABLE;
3632
3633                 /* fallthru */
3634         case ANEG_STATE_AN_ENABLE:
3635                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3636                 if (ap->flags & MR_AN_ENABLE) {
3637                         ap->link_time = 0;
3638                         ap->cur_time = 0;
3639                         ap->ability_match_cfg = 0;
3640                         ap->ability_match_count = 0;
3641                         ap->ability_match = 0;
3642                         ap->idle_match = 0;
3643                         ap->ack_match = 0;
3644
3645                         ap->state = ANEG_STATE_RESTART_INIT;
3646                 } else {
3647                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3648                 }
3649                 break;
3650
3651         case ANEG_STATE_RESTART_INIT:
3652                 ap->link_time = ap->cur_time;
3653                 ap->flags &= ~(MR_NP_LOADED);
3654                 ap->txconfig = 0;
3655                 tw32(MAC_TX_AUTO_NEG, 0);
3656                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3657                 tw32_f(MAC_MODE, tp->mac_mode);
3658                 udelay(40);
3659
3660                 ret = ANEG_TIMER_ENAB;
3661                 ap->state = ANEG_STATE_RESTART;
3662
3663                 /* fallthru */
3664         case ANEG_STATE_RESTART:
3665                 delta = ap->cur_time - ap->link_time;
3666                 if (delta > ANEG_STATE_SETTLE_TIME)
3667                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3668                 else
3669                         ret = ANEG_TIMER_ENAB;
3670                 break;
3671
3672         case ANEG_STATE_DISABLE_LINK_OK:
3673                 ret = ANEG_DONE;
3674                 break;
3675
3676         case ANEG_STATE_ABILITY_DETECT_INIT:
3677                 ap->flags &= ~(MR_TOGGLE_TX);
3678                 ap->txconfig = ANEG_CFG_FD;
3679                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3680                 if (flowctrl & ADVERTISE_1000XPAUSE)
3681                         ap->txconfig |= ANEG_CFG_PS1;
3682                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3683                         ap->txconfig |= ANEG_CFG_PS2;
3684                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3685                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3686                 tw32_f(MAC_MODE, tp->mac_mode);
3687                 udelay(40);
3688
3689                 ap->state = ANEG_STATE_ABILITY_DETECT;
3690                 break;
3691
3692         case ANEG_STATE_ABILITY_DETECT:
3693                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3694                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3695                 break;
3696
3697         case ANEG_STATE_ACK_DETECT_INIT:
3698                 ap->txconfig |= ANEG_CFG_ACK;
3699                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3700                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3701                 tw32_f(MAC_MODE, tp->mac_mode);
3702                 udelay(40);
3703
3704                 ap->state = ANEG_STATE_ACK_DETECT;
3705
3706                 /* fallthru */
3707         case ANEG_STATE_ACK_DETECT:
3708                 if (ap->ack_match != 0) {
3709                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3710                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3711                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3712                         } else {
3713                                 ap->state = ANEG_STATE_AN_ENABLE;
3714                         }
3715                 } else if (ap->ability_match != 0 &&
3716                            ap->rxconfig == 0) {
3717                         ap->state = ANEG_STATE_AN_ENABLE;
3718                 }
3719                 break;
3720
3721         case ANEG_STATE_COMPLETE_ACK_INIT:
3722                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3723                         ret = ANEG_FAILED;
3724                         break;
3725                 }
3726                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3727                                MR_LP_ADV_HALF_DUPLEX |
3728                                MR_LP_ADV_SYM_PAUSE |
3729                                MR_LP_ADV_ASYM_PAUSE |
3730                                MR_LP_ADV_REMOTE_FAULT1 |
3731                                MR_LP_ADV_REMOTE_FAULT2 |
3732                                MR_LP_ADV_NEXT_PAGE |
3733                                MR_TOGGLE_RX |
3734                                MR_NP_RX);
3735                 if (ap->rxconfig & ANEG_CFG_FD)
3736                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3737                 if (ap->rxconfig & ANEG_CFG_HD)
3738                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3739                 if (ap->rxconfig & ANEG_CFG_PS1)
3740                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3741                 if (ap->rxconfig & ANEG_CFG_PS2)
3742                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3743                 if (ap->rxconfig & ANEG_CFG_RF1)
3744                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3745                 if (ap->rxconfig & ANEG_CFG_RF2)
3746                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3747                 if (ap->rxconfig & ANEG_CFG_NP)
3748                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3749
3750                 ap->link_time = ap->cur_time;
3751
3752                 ap->flags ^= (MR_TOGGLE_TX);
3753                 if (ap->rxconfig & 0x0008)
3754                         ap->flags |= MR_TOGGLE_RX;
3755                 if (ap->rxconfig & ANEG_CFG_NP)
3756                         ap->flags |= MR_NP_RX;
3757                 ap->flags |= MR_PAGE_RX;
3758
3759                 ap->state = ANEG_STATE_COMPLETE_ACK;
3760                 ret = ANEG_TIMER_ENAB;
3761                 break;
3762
3763         case ANEG_STATE_COMPLETE_ACK:
3764                 if (ap->ability_match != 0 &&
3765                     ap->rxconfig == 0) {
3766                         ap->state = ANEG_STATE_AN_ENABLE;
3767                         break;
3768                 }
3769                 delta = ap->cur_time - ap->link_time;
3770                 if (delta > ANEG_STATE_SETTLE_TIME) {
3771                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3772                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3773                         } else {
3774                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3775                                     !(ap->flags & MR_NP_RX)) {
3776                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3777                                 } else {
3778                                         ret = ANEG_FAILED;
3779                                 }
3780                         }
3781                 }
3782                 break;
3783
3784         case ANEG_STATE_IDLE_DETECT_INIT:
3785                 ap->link_time = ap->cur_time;
3786                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3787                 tw32_f(MAC_MODE, tp->mac_mode);
3788                 udelay(40);
3789
3790                 ap->state = ANEG_STATE_IDLE_DETECT;
3791                 ret = ANEG_TIMER_ENAB;
3792                 break;
3793
3794         case ANEG_STATE_IDLE_DETECT:
3795                 if (ap->ability_match != 0 &&
3796                     ap->rxconfig == 0) {
3797                         ap->state = ANEG_STATE_AN_ENABLE;
3798                         break;
3799                 }
3800                 delta = ap->cur_time - ap->link_time;
3801                 if (delta > ANEG_STATE_SETTLE_TIME) {
3802                         /* XXX another gem from the Broadcom driver :( */
3803                         ap->state = ANEG_STATE_LINK_OK;
3804                 }
3805                 break;
3806
3807         case ANEG_STATE_LINK_OK:
3808                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3809                 ret = ANEG_DONE;
3810                 break;
3811
3812         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3813                 /* ??? unimplemented */
3814                 break;
3815
3816         case ANEG_STATE_NEXT_PAGE_WAIT:
3817                 /* ??? unimplemented */
3818                 break;
3819
3820         default:
3821                 ret = ANEG_FAILED;
3822                 break;
3823         }
3824
3825         return ret;
3826 }
3827
3828 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3829 {
3830         int res = 0;
3831         struct tg3_fiber_aneginfo aninfo;
3832         int status = ANEG_FAILED;
3833         unsigned int tick;
3834         u32 tmp;
3835
3836         tw32_f(MAC_TX_AUTO_NEG, 0);
3837
3838         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3839         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3840         udelay(40);
3841
3842         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3843         udelay(40);
3844
3845         memset(&aninfo, 0, sizeof(aninfo));
3846         aninfo.flags |= MR_AN_ENABLE;
3847         aninfo.state = ANEG_STATE_UNKNOWN;
3848         aninfo.cur_time = 0;
3849         tick = 0;
3850         while (++tick < 195000) {
3851                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3852                 if (status == ANEG_DONE || status == ANEG_FAILED)
3853                         break;
3854
3855                 udelay(1);
3856         }
3857
3858         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3859         tw32_f(MAC_MODE, tp->mac_mode);
3860         udelay(40);
3861
3862         *txflags = aninfo.txconfig;
3863         *rxflags = aninfo.flags;
3864
3865         if (status == ANEG_DONE &&
3866             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3867                              MR_LP_ADV_FULL_DUPLEX)))
3868                 res = 1;
3869
3870         return res;
3871 }
3872
3873 static void tg3_init_bcm8002(struct tg3 *tp)
3874 {
3875         u32 mac_status = tr32(MAC_STATUS);
3876         int i;
3877
3878         /* Reset when initting first time or we have a link. */
3879         if (tg3_flag(tp, INIT_COMPLETE) &&
3880             !(mac_status & MAC_STATUS_PCS_SYNCED))
3881                 return;
3882
3883         /* Set PLL lock range. */
3884         tg3_writephy(tp, 0x16, 0x8007);
3885
3886         /* SW reset */
3887         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3888
3889         /* Wait for reset to complete. */
3890         /* XXX schedule_timeout() ... */
3891         for (i = 0; i < 500; i++)
3892                 udelay(10);
3893
3894         /* Config mode; select PMA/Ch 1 regs. */
3895         tg3_writephy(tp, 0x10, 0x8411);
3896
3897         /* Enable auto-lock and comdet, select txclk for tx. */
3898         tg3_writephy(tp, 0x11, 0x0a10);
3899
3900         tg3_writephy(tp, 0x18, 0x00a0);
3901         tg3_writephy(tp, 0x16, 0x41ff);
3902
3903         /* Assert and deassert POR. */
3904         tg3_writephy(tp, 0x13, 0x0400);
3905         udelay(40);
3906         tg3_writephy(tp, 0x13, 0x0000);
3907
3908         tg3_writephy(tp, 0x11, 0x0a50);
3909         udelay(40);
3910         tg3_writephy(tp, 0x11, 0x0a10);
3911
3912         /* Wait for signal to stabilize */
3913         /* XXX schedule_timeout() ... */
3914         for (i = 0; i < 15000; i++)
3915                 udelay(10);
3916
3917         /* Deselect the channel register so we can read the PHYID
3918          * later.
3919          */
3920         tg3_writephy(tp, 0x10, 0x8011);
3921 }
3922
3923 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3924 {
3925         u16 flowctrl;
3926         u32 sg_dig_ctrl, sg_dig_status;
3927         u32 serdes_cfg, expected_sg_dig_ctrl;
3928         int workaround, port_a;
3929         int current_link_up;
3930
3931         serdes_cfg = 0;
3932         expected_sg_dig_ctrl = 0;
3933         workaround = 0;
3934         port_a = 1;
3935         current_link_up = 0;
3936
3937         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3938             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3939                 workaround = 1;
3940                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3941                         port_a = 0;
3942
3943                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3944                 /* preserve bits 20-23 for voltage regulator */
3945                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3946         }
3947
3948         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3949
3950         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3951                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3952                         if (workaround) {
3953                                 u32 val = serdes_cfg;
3954
3955                                 if (port_a)
3956                                         val |= 0xc010000;
3957                                 else
3958                                         val |= 0x4010000;
3959                                 tw32_f(MAC_SERDES_CFG, val);
3960                         }
3961
3962                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3963                 }
3964                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3965                         tg3_setup_flow_control(tp, 0, 0);
3966                         current_link_up = 1;
3967                 }
3968                 goto out;
3969         }
3970
3971         /* Want auto-negotiation.  */
3972         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3973
3974         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3975         if (flowctrl & ADVERTISE_1000XPAUSE)
3976                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3977         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3978                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3979
3980         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3981                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3982                     tp->serdes_counter &&
3983                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3984                                     MAC_STATUS_RCVD_CFG)) ==
3985                      MAC_STATUS_PCS_SYNCED)) {
3986                         tp->serdes_counter--;
3987                         current_link_up = 1;
3988                         goto out;
3989                 }
3990 restart_autoneg:
3991                 if (workaround)
3992                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3993                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3994                 udelay(5);
3995                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3996
3997                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3998                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3999         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4000                                  MAC_STATUS_SIGNAL_DET)) {
4001                 sg_dig_status = tr32(SG_DIG_STATUS);
4002                 mac_status = tr32(MAC_STATUS);
4003
4004                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4005                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4006                         u32 local_adv = 0, remote_adv = 0;
4007
4008                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4009                                 local_adv |= ADVERTISE_1000XPAUSE;
4010                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4011                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4012
4013                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4014                                 remote_adv |= LPA_1000XPAUSE;
4015                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4016                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4017
4018                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4019                         current_link_up = 1;
4020                         tp->serdes_counter = 0;
4021                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4022                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4023                         if (tp->serdes_counter)
4024                                 tp->serdes_counter--;
4025                         else {
4026                                 if (workaround) {
4027                                         u32 val = serdes_cfg;
4028
4029                                         if (port_a)
4030                                                 val |= 0xc010000;
4031                                         else
4032                                                 val |= 0x4010000;
4033
4034                                         tw32_f(MAC_SERDES_CFG, val);
4035                                 }
4036
4037                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4038                                 udelay(40);
4039
4040                                 /* Link parallel detection - link is up */
4041                                 /* only if we have PCS_SYNC and not */
4042                                 /* receiving config code words */
4043                                 mac_status = tr32(MAC_STATUS);
4044                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4045                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4046                                         tg3_setup_flow_control(tp, 0, 0);
4047                                         current_link_up = 1;
4048                                         tp->phy_flags |=
4049                                                 TG3_PHYFLG_PARALLEL_DETECT;
4050                                         tp->serdes_counter =
4051                                                 SERDES_PARALLEL_DET_TIMEOUT;
4052                                 } else
4053                                         goto restart_autoneg;
4054                         }
4055                 }
4056         } else {
4057                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4058                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4059         }
4060
4061 out:
4062         return current_link_up;
4063 }
4064
4065 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4066 {
4067         int current_link_up = 0;
4068
4069         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4070                 goto out;
4071
4072         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4073                 u32 txflags, rxflags;
4074                 int i;
4075
4076                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4077                         u32 local_adv = 0, remote_adv = 0;
4078
4079                         if (txflags & ANEG_CFG_PS1)
4080                                 local_adv |= ADVERTISE_1000XPAUSE;
4081                         if (txflags & ANEG_CFG_PS2)
4082                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4083
4084                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4085                                 remote_adv |= LPA_1000XPAUSE;
4086                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4087                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4088
4089                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4090
4091                         current_link_up = 1;
4092                 }
4093                 for (i = 0; i < 30; i++) {
4094                         udelay(20);
4095                         tw32_f(MAC_STATUS,
4096                                (MAC_STATUS_SYNC_CHANGED |
4097                                 MAC_STATUS_CFG_CHANGED));
4098                         udelay(40);
4099                         if ((tr32(MAC_STATUS) &
4100                              (MAC_STATUS_SYNC_CHANGED |
4101                               MAC_STATUS_CFG_CHANGED)) == 0)
4102                                 break;
4103                 }
4104
4105                 mac_status = tr32(MAC_STATUS);
4106                 if (current_link_up == 0 &&
4107                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4108                     !(mac_status & MAC_STATUS_RCVD_CFG))
4109                         current_link_up = 1;
4110         } else {
4111                 tg3_setup_flow_control(tp, 0, 0);
4112
4113                 /* Forcing 1000FD link up. */
4114                 current_link_up = 1;
4115
4116                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4117                 udelay(40);
4118
4119                 tw32_f(MAC_MODE, tp->mac_mode);
4120                 udelay(40);
4121         }
4122
4123 out:
4124         return current_link_up;
4125 }
4126
4127 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4128 {
4129         u32 orig_pause_cfg;
4130         u16 orig_active_speed;
4131         u8 orig_active_duplex;
4132         u32 mac_status;
4133         int current_link_up;
4134         int i;
4135
4136         orig_pause_cfg = tp->link_config.active_flowctrl;
4137         orig_active_speed = tp->link_config.active_speed;
4138         orig_active_duplex = tp->link_config.active_duplex;
4139
4140         if (!tg3_flag(tp, HW_AUTONEG) &&
4141             netif_carrier_ok(tp->dev) &&
4142             tg3_flag(tp, INIT_COMPLETE)) {
4143                 mac_status = tr32(MAC_STATUS);
4144                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4145                                MAC_STATUS_SIGNAL_DET |
4146                                MAC_STATUS_CFG_CHANGED |
4147                                MAC_STATUS_RCVD_CFG);
4148                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4149                                    MAC_STATUS_SIGNAL_DET)) {
4150                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4151                                             MAC_STATUS_CFG_CHANGED));
4152                         return 0;
4153                 }
4154         }
4155
4156         tw32_f(MAC_TX_AUTO_NEG, 0);
4157
4158         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4159         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4160         tw32_f(MAC_MODE, tp->mac_mode);
4161         udelay(40);
4162
4163         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4164                 tg3_init_bcm8002(tp);
4165
4166         /* Enable link change event even when serdes polling.  */
4167         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4168         udelay(40);
4169
4170         current_link_up = 0;
4171         mac_status = tr32(MAC_STATUS);
4172
4173         if (tg3_flag(tp, HW_AUTONEG))
4174                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4175         else
4176                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4177
4178         tp->napi[0].hw_status->status =
4179                 (SD_STATUS_UPDATED |
4180                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4181
4182         for (i = 0; i < 100; i++) {
4183                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4184                                     MAC_STATUS_CFG_CHANGED));
4185                 udelay(5);
4186                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4187                                          MAC_STATUS_CFG_CHANGED |
4188                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4189                         break;
4190         }
4191
4192         mac_status = tr32(MAC_STATUS);
4193         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4194                 current_link_up = 0;
4195                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4196                     tp->serdes_counter == 0) {
4197                         tw32_f(MAC_MODE, (tp->mac_mode |
4198                                           MAC_MODE_SEND_CONFIGS));
4199                         udelay(1);
4200                         tw32_f(MAC_MODE, tp->mac_mode);
4201                 }
4202         }
4203
4204         if (current_link_up == 1) {
4205                 tp->link_config.active_speed = SPEED_1000;
4206                 tp->link_config.active_duplex = DUPLEX_FULL;
4207                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4208                                     LED_CTRL_LNKLED_OVERRIDE |
4209                                     LED_CTRL_1000MBPS_ON));
4210         } else {
4211                 tp->link_config.active_speed = SPEED_INVALID;
4212                 tp->link_config.active_duplex = DUPLEX_INVALID;
4213                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4214                                     LED_CTRL_LNKLED_OVERRIDE |
4215                                     LED_CTRL_TRAFFIC_OVERRIDE));
4216         }
4217
4218         if (current_link_up != netif_carrier_ok(tp->dev)) {
4219                 if (current_link_up)
4220                         netif_carrier_on(tp->dev);
4221                 else
4222                         netif_carrier_off(tp->dev);
4223                 tg3_link_report(tp);
4224         } else {
4225                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4226                 if (orig_pause_cfg != now_pause_cfg ||
4227                     orig_active_speed != tp->link_config.active_speed ||
4228                     orig_active_duplex != tp->link_config.active_duplex)
4229                         tg3_link_report(tp);
4230         }
4231
4232         return 0;
4233 }
4234
4235 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4236 {
4237         int current_link_up, err = 0;
4238         u32 bmsr, bmcr;
4239         u16 current_speed;
4240         u8 current_duplex;
4241         u32 local_adv, remote_adv;
4242
4243         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4244         tw32_f(MAC_MODE, tp->mac_mode);
4245         udelay(40);
4246
4247         tw32(MAC_EVENT, 0);
4248
4249         tw32_f(MAC_STATUS,
4250              (MAC_STATUS_SYNC_CHANGED |
4251               MAC_STATUS_CFG_CHANGED |
4252               MAC_STATUS_MI_COMPLETION |
4253               MAC_STATUS_LNKSTATE_CHANGED));
4254         udelay(40);
4255
4256         if (force_reset)
4257                 tg3_phy_reset(tp);
4258
4259         current_link_up = 0;
4260         current_speed = SPEED_INVALID;
4261         current_duplex = DUPLEX_INVALID;
4262
4263         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4264         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4265         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4266                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4267                         bmsr |= BMSR_LSTATUS;
4268                 else
4269                         bmsr &= ~BMSR_LSTATUS;
4270         }
4271
4272         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4273
4274         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4275             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4276                 /* do nothing, just check for link up at the end */
4277         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4278                 u32 adv, new_adv;
4279
4280                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4281                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4282                                   ADVERTISE_1000XPAUSE |
4283                                   ADVERTISE_1000XPSE_ASYM |
4284                                   ADVERTISE_SLCT);
4285
4286                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4287
4288                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4289                         new_adv |= ADVERTISE_1000XHALF;
4290                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4291                         new_adv |= ADVERTISE_1000XFULL;
4292
4293                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4294                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4295                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4296                         tg3_writephy(tp, MII_BMCR, bmcr);
4297
4298                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4299                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4300                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4301
4302                         return err;
4303                 }
4304         } else {
4305                 u32 new_bmcr;
4306
4307                 bmcr &= ~BMCR_SPEED1000;
4308                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4309
4310                 if (tp->link_config.duplex == DUPLEX_FULL)
4311                         new_bmcr |= BMCR_FULLDPLX;
4312
4313                 if (new_bmcr != bmcr) {
4314                         /* BMCR_SPEED1000 is a reserved bit that needs
4315                          * to be set on write.
4316                          */
4317                         new_bmcr |= BMCR_SPEED1000;
4318
4319                         /* Force a linkdown */
4320                         if (netif_carrier_ok(tp->dev)) {
4321                                 u32 adv;
4322
4323                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4324                                 adv &= ~(ADVERTISE_1000XFULL |
4325                                          ADVERTISE_1000XHALF |
4326                                          ADVERTISE_SLCT);
4327                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4328                                 tg3_writephy(tp, MII_BMCR, bmcr |
4329                                                            BMCR_ANRESTART |
4330                                                            BMCR_ANENABLE);
4331                                 udelay(10);
4332                                 netif_carrier_off(tp->dev);
4333                         }
4334                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4335                         bmcr = new_bmcr;
4336                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4337                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4338                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4339                             ASIC_REV_5714) {
4340                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4341                                         bmsr |= BMSR_LSTATUS;
4342                                 else
4343                                         bmsr &= ~BMSR_LSTATUS;
4344                         }
4345                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4346                 }
4347         }
4348
4349         if (bmsr & BMSR_LSTATUS) {
4350                 current_speed = SPEED_1000;
4351                 current_link_up = 1;
4352                 if (bmcr & BMCR_FULLDPLX)
4353                         current_duplex = DUPLEX_FULL;
4354                 else
4355                         current_duplex = DUPLEX_HALF;
4356
4357                 local_adv = 0;
4358                 remote_adv = 0;
4359
4360                 if (bmcr & BMCR_ANENABLE) {
4361                         u32 common;
4362
4363                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4364                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4365                         common = local_adv & remote_adv;
4366                         if (common & (ADVERTISE_1000XHALF |
4367                                       ADVERTISE_1000XFULL)) {
4368                                 if (common & ADVERTISE_1000XFULL)
4369                                         current_duplex = DUPLEX_FULL;
4370                                 else
4371                                         current_duplex = DUPLEX_HALF;
4372                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4373                                 /* Link is up via parallel detect */
4374                         } else {
4375                                 current_link_up = 0;
4376                         }
4377                 }
4378         }
4379
4380         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4381                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4382
4383         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4384         if (tp->link_config.active_duplex == DUPLEX_HALF)
4385                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4386
4387         tw32_f(MAC_MODE, tp->mac_mode);
4388         udelay(40);
4389
4390         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4391
4392         tp->link_config.active_speed = current_speed;
4393         tp->link_config.active_duplex = current_duplex;
4394
4395         if (current_link_up != netif_carrier_ok(tp->dev)) {
4396                 if (current_link_up)
4397                         netif_carrier_on(tp->dev);
4398                 else {
4399                         netif_carrier_off(tp->dev);
4400                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4401                 }
4402                 tg3_link_report(tp);
4403         }
4404         return err;
4405 }
4406
4407 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4408 {
4409         if (tp->serdes_counter) {
4410                 /* Give autoneg time to complete. */
4411                 tp->serdes_counter--;
4412                 return;
4413         }
4414
4415         if (!netif_carrier_ok(tp->dev) &&
4416             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4417                 u32 bmcr;
4418
4419                 tg3_readphy(tp, MII_BMCR, &bmcr);
4420                 if (bmcr & BMCR_ANENABLE) {
4421                         u32 phy1, phy2;
4422
4423                         /* Select shadow register 0x1f */
4424                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4425                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4426
4427                         /* Select expansion interrupt status register */
4428                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4429                                          MII_TG3_DSP_EXP1_INT_STAT);
4430                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4431                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4432
4433                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4434                                 /* We have signal detect and not receiving
4435                                  * config code words, link is up by parallel
4436                                  * detection.
4437                                  */
4438
4439                                 bmcr &= ~BMCR_ANENABLE;
4440                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4441                                 tg3_writephy(tp, MII_BMCR, bmcr);
4442                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4443                         }
4444                 }
4445         } else if (netif_carrier_ok(tp->dev) &&
4446                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4447                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4448                 u32 phy2;
4449
4450                 /* Select expansion interrupt status register */
4451                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4452                                  MII_TG3_DSP_EXP1_INT_STAT);
4453                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4454                 if (phy2 & 0x20) {
4455                         u32 bmcr;
4456
4457                         /* Config code words received, turn on autoneg. */
4458                         tg3_readphy(tp, MII_BMCR, &bmcr);
4459                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4460
4461                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4462
4463                 }
4464         }
4465 }
4466
4467 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4468 {
4469         u32 val;
4470         int err;
4471
4472         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4473                 err = tg3_setup_fiber_phy(tp, force_reset);
4474         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4475                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4476         else
4477                 err = tg3_setup_copper_phy(tp, force_reset);
4478
4479         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4480                 u32 scale;
4481
4482                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4483                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4484                         scale = 65;
4485                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4486                         scale = 6;
4487                 else
4488                         scale = 12;
4489
4490                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4491                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4492                 tw32(GRC_MISC_CFG, val);
4493         }
4494
4495         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4496               (6 << TX_LENGTHS_IPG_SHIFT);
4497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4498                 val |= tr32(MAC_TX_LENGTHS) &
4499                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4500                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4501
4502         if (tp->link_config.active_speed == SPEED_1000 &&
4503             tp->link_config.active_duplex == DUPLEX_HALF)
4504                 tw32(MAC_TX_LENGTHS, val |
4505                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4506         else
4507                 tw32(MAC_TX_LENGTHS, val |
4508                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4509
4510         if (!tg3_flag(tp, 5705_PLUS)) {
4511                 if (netif_carrier_ok(tp->dev)) {
4512                         tw32(HOSTCC_STAT_COAL_TICKS,
4513                              tp->coal.stats_block_coalesce_usecs);
4514                 } else {
4515                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4516                 }
4517         }
4518
4519         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4520                 val = tr32(PCIE_PWR_MGMT_THRESH);
4521                 if (!netif_carrier_ok(tp->dev))
4522                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4523                               tp->pwrmgmt_thresh;
4524                 else
4525                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4526                 tw32(PCIE_PWR_MGMT_THRESH, val);
4527         }
4528
4529         return err;
4530 }
4531
4532 static inline int tg3_irq_sync(struct tg3 *tp)
4533 {
4534         return tp->irq_sync;
4535 }
4536
4537 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4538 {
4539         int i;
4540
4541         dst = (u32 *)((u8 *)dst + off);
4542         for (i = 0; i < len; i += sizeof(u32))
4543                 *dst++ = tr32(off + i);
4544 }
4545
4546 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4547 {
4548         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4549         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4550         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4551         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4552         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4553         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4554         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4555         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4556         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4557         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4558         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4559         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4560         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4561         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4562         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4563         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4564         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4565         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4566         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4567
4568         if (tg3_flag(tp, SUPPORT_MSIX))
4569                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4570
4571         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4572         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4573         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4574         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4575         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4576         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4577         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4578         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4579
4580         if (!tg3_flag(tp, 5705_PLUS)) {
4581                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4582                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4583                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4584         }
4585
4586         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4587         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4588         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4589         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4590         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4591
4592         if (tg3_flag(tp, NVRAM))
4593                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4594 }
4595
4596 static void tg3_dump_state(struct tg3 *tp)
4597 {
4598         int i;
4599         u32 *regs;
4600
4601         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4602         if (!regs) {
4603                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4604                 return;
4605         }
4606
4607         if (tg3_flag(tp, PCI_EXPRESS)) {
4608                 /* Read up to but not including private PCI registers */
4609                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4610                         regs[i / sizeof(u32)] = tr32(i);
4611         } else
4612                 tg3_dump_legacy_regs(tp, regs);
4613
4614         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4615                 if (!regs[i + 0] && !regs[i + 1] &&
4616                     !regs[i + 2] && !regs[i + 3])
4617                         continue;
4618
4619                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4620                            i * 4,
4621                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4622         }
4623
4624         kfree(regs);
4625
4626         for (i = 0; i < tp->irq_cnt; i++) {
4627                 struct tg3_napi *tnapi = &tp->napi[i];
4628
4629                 /* SW status block */
4630                 netdev_err(tp->dev,
4631                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4632                            i,
4633                            tnapi->hw_status->status,
4634                            tnapi->hw_status->status_tag,
4635                            tnapi->hw_status->rx_jumbo_consumer,
4636                            tnapi->hw_status->rx_consumer,
4637                            tnapi->hw_status->rx_mini_consumer,
4638                            tnapi->hw_status->idx[0].rx_producer,
4639                            tnapi->hw_status->idx[0].tx_consumer);
4640
4641                 netdev_err(tp->dev,
4642                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4643                            i,
4644                            tnapi->last_tag, tnapi->last_irq_tag,
4645                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4646                            tnapi->rx_rcb_ptr,
4647                            tnapi->prodring.rx_std_prod_idx,
4648                            tnapi->prodring.rx_std_cons_idx,
4649                            tnapi->prodring.rx_jmb_prod_idx,
4650                            tnapi->prodring.rx_jmb_cons_idx);
4651         }
4652 }
4653
4654 /* This is called whenever we suspect that the system chipset is re-
4655  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4656  * is bogus tx completions. We try to recover by setting the
4657  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4658  * in the workqueue.
4659  */
4660 static void tg3_tx_recover(struct tg3 *tp)
4661 {
4662         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4663                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4664
4665         netdev_warn(tp->dev,
4666                     "The system may be re-ordering memory-mapped I/O "
4667                     "cycles to the network device, attempting to recover. "
4668                     "Please report the problem to the driver maintainer "
4669                     "and include system chipset information.\n");
4670
4671         spin_lock(&tp->lock);
4672         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4673         spin_unlock(&tp->lock);
4674 }
4675
4676 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4677 {
4678         /* Tell compiler to fetch tx indices from memory. */
4679         barrier();
4680         return tnapi->tx_pending -
4681                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4682 }
4683
4684 /* Tigon3 never reports partial packet sends.  So we do not
4685  * need special logic to handle SKBs that have not had all
4686  * of their frags sent yet, like SunGEM does.
4687  */
4688 static void tg3_tx(struct tg3_napi *tnapi)
4689 {
4690         struct tg3 *tp = tnapi->tp;
4691         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4692         u32 sw_idx = tnapi->tx_cons;
4693         struct netdev_queue *txq;
4694         int index = tnapi - tp->napi;
4695
4696         if (tg3_flag(tp, ENABLE_TSS))
4697                 index--;
4698
4699         txq = netdev_get_tx_queue(tp->dev, index);
4700
4701         while (sw_idx != hw_idx) {
4702                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4703                 struct sk_buff *skb = ri->skb;
4704                 int i, tx_bug = 0;
4705
4706                 if (unlikely(skb == NULL)) {
4707                         tg3_tx_recover(tp);
4708                         return;
4709                 }
4710
4711                 pci_unmap_single(tp->pdev,
4712                                  dma_unmap_addr(ri, mapping),
4713                                  skb_headlen(skb),
4714                                  PCI_DMA_TODEVICE);
4715
4716                 ri->skb = NULL;
4717
4718                 sw_idx = NEXT_TX(sw_idx);
4719
4720                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4721                         ri = &tnapi->tx_buffers[sw_idx];
4722                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4723                                 tx_bug = 1;
4724
4725                         pci_unmap_page(tp->pdev,
4726                                        dma_unmap_addr(ri, mapping),
4727                                        skb_shinfo(skb)->frags[i].size,
4728                                        PCI_DMA_TODEVICE);
4729                         sw_idx = NEXT_TX(sw_idx);
4730                 }
4731
4732                 dev_kfree_skb(skb);
4733
4734                 if (unlikely(tx_bug)) {
4735                         tg3_tx_recover(tp);
4736                         return;
4737                 }
4738         }
4739
4740         tnapi->tx_cons = sw_idx;
4741
4742         /* Need to make the tx_cons update visible to tg3_start_xmit()
4743          * before checking for netif_queue_stopped().  Without the
4744          * memory barrier, there is a small possibility that tg3_start_xmit()
4745          * will miss it and cause the queue to be stopped forever.
4746          */
4747         smp_mb();
4748
4749         if (unlikely(netif_tx_queue_stopped(txq) &&
4750                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4751                 __netif_tx_lock(txq, smp_processor_id());
4752                 if (netif_tx_queue_stopped(txq) &&
4753                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4754                         netif_tx_wake_queue(txq);
4755                 __netif_tx_unlock(txq);
4756         }
4757 }
4758
4759 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4760 {
4761         if (!ri->skb)
4762                 return;
4763
4764         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4765                          map_sz, PCI_DMA_FROMDEVICE);
4766         dev_kfree_skb_any(ri->skb);
4767         ri->skb = NULL;
4768 }
4769
4770 /* Returns size of skb allocated or < 0 on error.
4771  *
4772  * We only need to fill in the address because the other members
4773  * of the RX descriptor are invariant, see tg3_init_rings.
4774  *
4775  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4776  * posting buffers we only dirty the first cache line of the RX
4777  * descriptor (containing the address).  Whereas for the RX status
4778  * buffers the cpu only reads the last cacheline of the RX descriptor
4779  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4780  */
4781 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4782                             u32 opaque_key, u32 dest_idx_unmasked)
4783 {
4784         struct tg3_rx_buffer_desc *desc;
4785         struct ring_info *map;
4786         struct sk_buff *skb;
4787         dma_addr_t mapping;
4788         int skb_size, dest_idx;
4789
4790         switch (opaque_key) {
4791         case RXD_OPAQUE_RING_STD:
4792                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4793                 desc = &tpr->rx_std[dest_idx];
4794                 map = &tpr->rx_std_buffers[dest_idx];
4795                 skb_size = tp->rx_pkt_map_sz;
4796                 break;
4797
4798         case RXD_OPAQUE_RING_JUMBO:
4799                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4800                 desc = &tpr->rx_jmb[dest_idx].std;
4801                 map = &tpr->rx_jmb_buffers[dest_idx];
4802                 skb_size = TG3_RX_JMB_MAP_SZ;
4803                 break;
4804
4805         default:
4806                 return -EINVAL;
4807         }
4808
4809         /* Do not overwrite any of the map or rp information
4810          * until we are sure we can commit to a new buffer.
4811          *
4812          * Callers depend upon this behavior and assume that
4813          * we leave everything unchanged if we fail.
4814          */
4815         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4816         if (skb == NULL)
4817                 return -ENOMEM;
4818
4819         skb_reserve(skb, tp->rx_offset);
4820
4821         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4822                                  PCI_DMA_FROMDEVICE);
4823         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4824                 dev_kfree_skb(skb);
4825                 return -EIO;
4826         }
4827
4828         map->skb = skb;
4829         dma_unmap_addr_set(map, mapping, mapping);
4830
4831         desc->addr_hi = ((u64)mapping >> 32);
4832         desc->addr_lo = ((u64)mapping & 0xffffffff);
4833
4834         return skb_size;
4835 }
4836
4837 /* We only need to move over in the address because the other
4838  * members of the RX descriptor are invariant.  See notes above
4839  * tg3_alloc_rx_skb for full details.
4840  */
4841 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4842                            struct tg3_rx_prodring_set *dpr,
4843                            u32 opaque_key, int src_idx,
4844                            u32 dest_idx_unmasked)
4845 {
4846         struct tg3 *tp = tnapi->tp;
4847         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4848         struct ring_info *src_map, *dest_map;
4849         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4850         int dest_idx;
4851
4852         switch (opaque_key) {
4853         case RXD_OPAQUE_RING_STD:
4854                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4855                 dest_desc = &dpr->rx_std[dest_idx];
4856                 dest_map = &dpr->rx_std_buffers[dest_idx];
4857                 src_desc = &spr->rx_std[src_idx];
4858                 src_map = &spr->rx_std_buffers[src_idx];
4859                 break;
4860
4861         case RXD_OPAQUE_RING_JUMBO:
4862                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4863                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4864                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4865                 src_desc = &spr->rx_jmb[src_idx].std;
4866                 src_map = &spr->rx_jmb_buffers[src_idx];
4867                 break;
4868
4869         default:
4870                 return;
4871         }
4872
4873         dest_map->skb = src_map->skb;
4874         dma_unmap_addr_set(dest_map, mapping,
4875                            dma_unmap_addr(src_map, mapping));
4876         dest_desc->addr_hi = src_desc->addr_hi;
4877         dest_desc->addr_lo = src_desc->addr_lo;
4878
4879         /* Ensure that the update to the skb happens after the physical
4880          * addresses have been transferred to the new BD location.
4881          */
4882         smp_wmb();
4883
4884         src_map->skb = NULL;
4885 }
4886
4887 /* The RX ring scheme is composed of multiple rings which post fresh
4888  * buffers to the chip, and one special ring the chip uses to report
4889  * status back to the host.
4890  *
4891  * The special ring reports the status of received packets to the
4892  * host.  The chip does not write into the original descriptor the
4893  * RX buffer was obtained from.  The chip simply takes the original
4894  * descriptor as provided by the host, updates the status and length
4895  * field, then writes this into the next status ring entry.
4896  *
4897  * Each ring the host uses to post buffers to the chip is described
4898  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4899  * it is first placed into the on-chip ram.  When the packet's length
4900  * is known, it walks down the TG3_BDINFO entries to select the ring.
4901  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4902  * which is within the range of the new packet's length is chosen.
4903  *
4904  * The "separate ring for rx status" scheme may sound queer, but it makes
4905  * sense from a cache coherency perspective.  If only the host writes
4906  * to the buffer post rings, and only the chip writes to the rx status
4907  * rings, then cache lines never move beyond shared-modified state.
4908  * If both the host and chip were to write into the same ring, cache line
4909  * eviction could occur since both entities want it in an exclusive state.
4910  */
4911 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4912 {
4913         struct tg3 *tp = tnapi->tp;
4914         u32 work_mask, rx_std_posted = 0;
4915         u32 std_prod_idx, jmb_prod_idx;
4916         u32 sw_idx = tnapi->rx_rcb_ptr;
4917         u16 hw_idx;
4918         int received;
4919         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4920
4921         hw_idx = *(tnapi->rx_rcb_prod_idx);
4922         /*
4923          * We need to order the read of hw_idx and the read of
4924          * the opaque cookie.
4925          */
4926         rmb();
4927         work_mask = 0;
4928         received = 0;
4929         std_prod_idx = tpr->rx_std_prod_idx;
4930         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4931         while (sw_idx != hw_idx && budget > 0) {
4932                 struct ring_info *ri;
4933                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4934                 unsigned int len;
4935                 struct sk_buff *skb;
4936                 dma_addr_t dma_addr;
4937                 u32 opaque_key, desc_idx, *post_ptr;
4938
4939                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4940                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4941                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4942                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4943                         dma_addr = dma_unmap_addr(ri, mapping);
4944                         skb = ri->skb;
4945                         post_ptr = &std_prod_idx;
4946                         rx_std_posted++;
4947                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4948                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4949                         dma_addr = dma_unmap_addr(ri, mapping);
4950                         skb = ri->skb;
4951                         post_ptr = &jmb_prod_idx;
4952                 } else
4953                         goto next_pkt_nopost;
4954
4955                 work_mask |= opaque_key;
4956
4957                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4958                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4959                 drop_it:
4960                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4961                                        desc_idx, *post_ptr);
4962                 drop_it_no_recycle:
4963                         /* Other statistics kept track of by card. */
4964                         tp->rx_dropped++;
4965                         goto next_pkt;
4966                 }
4967
4968                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4969                       ETH_FCS_LEN;
4970
4971                 if (len > TG3_RX_COPY_THRESH(tp)) {
4972                         int skb_size;
4973
4974                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4975                                                     *post_ptr);
4976                         if (skb_size < 0)
4977                                 goto drop_it;
4978
4979                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4980                                          PCI_DMA_FROMDEVICE);
4981
4982                         /* Ensure that the update to the skb happens
4983                          * after the usage of the old DMA mapping.
4984                          */
4985                         smp_wmb();
4986
4987                         ri->skb = NULL;
4988
4989                         skb_put(skb, len);
4990                 } else {
4991                         struct sk_buff *copy_skb;
4992
4993                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4994                                        desc_idx, *post_ptr);
4995
4996                         copy_skb = netdev_alloc_skb(tp->dev, len +
4997                                                     TG3_RAW_IP_ALIGN);
4998                         if (copy_skb == NULL)
4999                                 goto drop_it_no_recycle;
5000
5001                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5002                         skb_put(copy_skb, len);
5003                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5004                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5005                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5006
5007                         /* We'll reuse the original ring buffer. */
5008                         skb = copy_skb;
5009                 }
5010
5011                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5012                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5013                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5014                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5015                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5016                 else
5017                         skb_checksum_none_assert(skb);
5018
5019                 skb->protocol = eth_type_trans(skb, tp->dev);
5020
5021                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5022                     skb->protocol != htons(ETH_P_8021Q)) {
5023                         dev_kfree_skb(skb);
5024                         goto drop_it_no_recycle;
5025                 }
5026
5027                 if (desc->type_flags & RXD_FLAG_VLAN &&
5028                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5029                         __vlan_hwaccel_put_tag(skb,
5030                                                desc->err_vlan & RXD_VLAN_MASK);
5031
5032                 napi_gro_receive(&tnapi->napi, skb);
5033
5034                 received++;
5035                 budget--;
5036
5037 next_pkt:
5038                 (*post_ptr)++;
5039
5040                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5041                         tpr->rx_std_prod_idx = std_prod_idx &
5042                                                tp->rx_std_ring_mask;
5043                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5044                                      tpr->rx_std_prod_idx);
5045                         work_mask &= ~RXD_OPAQUE_RING_STD;
5046                         rx_std_posted = 0;
5047                 }
5048 next_pkt_nopost:
5049                 sw_idx++;
5050                 sw_idx &= tp->rx_ret_ring_mask;
5051
5052                 /* Refresh hw_idx to see if there is new work */
5053                 if (sw_idx == hw_idx) {
5054                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5055                         rmb();
5056                 }
5057         }
5058
5059         /* ACK the status ring. */
5060         tnapi->rx_rcb_ptr = sw_idx;
5061         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5062
5063         /* Refill RX ring(s). */
5064         if (!tg3_flag(tp, ENABLE_RSS)) {
5065                 if (work_mask & RXD_OPAQUE_RING_STD) {
5066                         tpr->rx_std_prod_idx = std_prod_idx &
5067                                                tp->rx_std_ring_mask;
5068                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5069                                      tpr->rx_std_prod_idx);
5070                 }
5071                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5072                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5073                                                tp->rx_jmb_ring_mask;
5074                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5075                                      tpr->rx_jmb_prod_idx);
5076                 }
5077                 mmiowb();
5078         } else if (work_mask) {
5079                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5080                  * updated before the producer indices can be updated.
5081                  */
5082                 smp_wmb();
5083
5084                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5085                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5086
5087                 if (tnapi != &tp->napi[1])
5088                         napi_schedule(&tp->napi[1].napi);
5089         }
5090
5091         return received;
5092 }
5093
5094 static void tg3_poll_link(struct tg3 *tp)
5095 {
5096         /* handle link change and other phy events */
5097         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5098                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5099
5100                 if (sblk->status & SD_STATUS_LINK_CHG) {
5101                         sblk->status = SD_STATUS_UPDATED |
5102                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5103                         spin_lock(&tp->lock);
5104                         if (tg3_flag(tp, USE_PHYLIB)) {
5105                                 tw32_f(MAC_STATUS,
5106                                      (MAC_STATUS_SYNC_CHANGED |
5107                                       MAC_STATUS_CFG_CHANGED |
5108                                       MAC_STATUS_MI_COMPLETION |
5109                                       MAC_STATUS_LNKSTATE_CHANGED));
5110                                 udelay(40);
5111                         } else
5112                                 tg3_setup_phy(tp, 0);
5113                         spin_unlock(&tp->lock);
5114                 }
5115         }
5116 }
5117
5118 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5119                                 struct tg3_rx_prodring_set *dpr,
5120                                 struct tg3_rx_prodring_set *spr)
5121 {
5122         u32 si, di, cpycnt, src_prod_idx;
5123         int i, err = 0;
5124
5125         while (1) {
5126                 src_prod_idx = spr->rx_std_prod_idx;
5127
5128                 /* Make sure updates to the rx_std_buffers[] entries and the
5129                  * standard producer index are seen in the correct order.
5130                  */
5131                 smp_rmb();
5132
5133                 if (spr->rx_std_cons_idx == src_prod_idx)
5134                         break;
5135
5136                 if (spr->rx_std_cons_idx < src_prod_idx)
5137                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5138                 else
5139                         cpycnt = tp->rx_std_ring_mask + 1 -
5140                                  spr->rx_std_cons_idx;
5141
5142                 cpycnt = min(cpycnt,
5143                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5144
5145                 si = spr->rx_std_cons_idx;
5146                 di = dpr->rx_std_prod_idx;
5147
5148                 for (i = di; i < di + cpycnt; i++) {
5149                         if (dpr->rx_std_buffers[i].skb) {
5150                                 cpycnt = i - di;
5151                                 err = -ENOSPC;
5152                                 break;
5153                         }
5154                 }
5155
5156                 if (!cpycnt)
5157                         break;
5158
5159                 /* Ensure that updates to the rx_std_buffers ring and the
5160                  * shadowed hardware producer ring from tg3_recycle_skb() are
5161                  * ordered correctly WRT the skb check above.
5162                  */
5163                 smp_rmb();
5164
5165                 memcpy(&dpr->rx_std_buffers[di],
5166                        &spr->rx_std_buffers[si],
5167                        cpycnt * sizeof(struct ring_info));
5168
5169                 for (i = 0; i < cpycnt; i++, di++, si++) {
5170                         struct tg3_rx_buffer_desc *sbd, *dbd;
5171                         sbd = &spr->rx_std[si];
5172                         dbd = &dpr->rx_std[di];
5173                         dbd->addr_hi = sbd->addr_hi;
5174                         dbd->addr_lo = sbd->addr_lo;
5175                 }
5176
5177                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5178                                        tp->rx_std_ring_mask;
5179                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5180                                        tp->rx_std_ring_mask;
5181         }
5182
5183         while (1) {
5184                 src_prod_idx = spr->rx_jmb_prod_idx;
5185
5186                 /* Make sure updates to the rx_jmb_buffers[] entries and
5187                  * the jumbo producer index are seen in the correct order.
5188                  */
5189                 smp_rmb();
5190
5191                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5192                         break;
5193
5194                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5195                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5196                 else
5197                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5198                                  spr->rx_jmb_cons_idx;
5199
5200                 cpycnt = min(cpycnt,
5201                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5202
5203                 si = spr->rx_jmb_cons_idx;
5204                 di = dpr->rx_jmb_prod_idx;
5205
5206                 for (i = di; i < di + cpycnt; i++) {
5207                         if (dpr->rx_jmb_buffers[i].skb) {
5208                                 cpycnt = i - di;
5209                                 err = -ENOSPC;
5210                                 break;
5211                         }
5212                 }
5213
5214                 if (!cpycnt)
5215                         break;
5216
5217                 /* Ensure that updates to the rx_jmb_buffers ring and the
5218                  * shadowed hardware producer ring from tg3_recycle_skb() are
5219                  * ordered correctly WRT the skb check above.
5220                  */
5221                 smp_rmb();
5222
5223                 memcpy(&dpr->rx_jmb_buffers[di],
5224                        &spr->rx_jmb_buffers[si],
5225                        cpycnt * sizeof(struct ring_info));
5226
5227                 for (i = 0; i < cpycnt; i++, di++, si++) {
5228                         struct tg3_rx_buffer_desc *sbd, *dbd;
5229                         sbd = &spr->rx_jmb[si].std;
5230                         dbd = &dpr->rx_jmb[di].std;
5231                         dbd->addr_hi = sbd->addr_hi;
5232                         dbd->addr_lo = sbd->addr_lo;
5233                 }
5234
5235                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5236                                        tp->rx_jmb_ring_mask;
5237                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5238                                        tp->rx_jmb_ring_mask;
5239         }
5240
5241         return err;
5242 }
5243
5244 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5245 {
5246         struct tg3 *tp = tnapi->tp;
5247
5248         /* run TX completion thread */
5249         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5250                 tg3_tx(tnapi);
5251                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5252                         return work_done;
5253         }
5254
5255         /* run RX thread, within the bounds set by NAPI.
5256          * All RX "locking" is done by ensuring outside
5257          * code synchronizes with tg3->napi.poll()
5258          */
5259         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5260                 work_done += tg3_rx(tnapi, budget - work_done);
5261
5262         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5263                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5264                 int i, err = 0;
5265                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5266                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5267
5268                 for (i = 1; i < tp->irq_cnt; i++)
5269                         err |= tg3_rx_prodring_xfer(tp, dpr,
5270                                                     &tp->napi[i].prodring);
5271
5272                 wmb();
5273
5274                 if (std_prod_idx != dpr->rx_std_prod_idx)
5275                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5276                                      dpr->rx_std_prod_idx);
5277
5278                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5279                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5280                                      dpr->rx_jmb_prod_idx);
5281
5282                 mmiowb();
5283
5284                 if (err)
5285                         tw32_f(HOSTCC_MODE, tp->coal_now);
5286         }
5287
5288         return work_done;
5289 }
5290
5291 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5292 {
5293         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5294         struct tg3 *tp = tnapi->tp;
5295         int work_done = 0;
5296         struct tg3_hw_status *sblk = tnapi->hw_status;
5297
5298         while (1) {
5299                 work_done = tg3_poll_work(tnapi, work_done, budget);
5300
5301                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5302                         goto tx_recovery;
5303
5304                 if (unlikely(work_done >= budget))
5305                         break;
5306
5307                 /* tp->last_tag is used in tg3_int_reenable() below
5308                  * to tell the hw how much work has been processed,
5309                  * so we must read it before checking for more work.
5310                  */
5311                 tnapi->last_tag = sblk->status_tag;
5312                 tnapi->last_irq_tag = tnapi->last_tag;
5313                 rmb();
5314
5315                 /* check for RX/TX work to do */
5316                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5317                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5318                         napi_complete(napi);
5319                         /* Reenable interrupts. */
5320                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5321                         mmiowb();
5322                         break;
5323                 }
5324         }
5325
5326         return work_done;
5327
5328 tx_recovery:
5329         /* work_done is guaranteed to be less than budget. */
5330         napi_complete(napi);
5331         schedule_work(&tp->reset_task);
5332         return work_done;
5333 }
5334
5335 static void tg3_process_error(struct tg3 *tp)
5336 {
5337         u32 val;
5338         bool real_error = false;
5339
5340         if (tg3_flag(tp, ERROR_PROCESSED))
5341                 return;
5342
5343         /* Check Flow Attention register */
5344         val = tr32(HOSTCC_FLOW_ATTN);
5345         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5346                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5347                 real_error = true;
5348         }
5349
5350         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5351                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5352                 real_error = true;
5353         }
5354
5355         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5356                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5357                 real_error = true;
5358         }
5359
5360         if (!real_error)
5361                 return;
5362
5363         tg3_dump_state(tp);
5364
5365         tg3_flag_set(tp, ERROR_PROCESSED);
5366         schedule_work(&tp->reset_task);
5367 }
5368
5369 static int tg3_poll(struct napi_struct *napi, int budget)
5370 {
5371         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5372         struct tg3 *tp = tnapi->tp;
5373         int work_done = 0;
5374         struct tg3_hw_status *sblk = tnapi->hw_status;
5375
5376         while (1) {
5377                 if (sblk->status & SD_STATUS_ERROR)
5378                         tg3_process_error(tp);
5379
5380                 tg3_poll_link(tp);
5381
5382                 work_done = tg3_poll_work(tnapi, work_done, budget);
5383
5384                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5385                         goto tx_recovery;
5386
5387                 if (unlikely(work_done >= budget))
5388                         break;
5389
5390                 if (tg3_flag(tp, TAGGED_STATUS)) {
5391                         /* tp->last_tag is used in tg3_int_reenable() below
5392                          * to tell the hw how much work has been processed,
5393                          * so we must read it before checking for more work.
5394                          */
5395                         tnapi->last_tag = sblk->status_tag;
5396                         tnapi->last_irq_tag = tnapi->last_tag;
5397                         rmb();
5398                 } else
5399                         sblk->status &= ~SD_STATUS_UPDATED;
5400
5401                 if (likely(!tg3_has_work(tnapi))) {
5402                         napi_complete(napi);
5403                         tg3_int_reenable(tnapi);
5404                         break;
5405                 }
5406         }
5407
5408         return work_done;
5409
5410 tx_recovery:
5411         /* work_done is guaranteed to be less than budget. */
5412         napi_complete(napi);
5413         schedule_work(&tp->reset_task);
5414         return work_done;
5415 }
5416
5417 static void tg3_napi_disable(struct tg3 *tp)
5418 {
5419         int i;
5420
5421         for (i = tp->irq_cnt - 1; i >= 0; i--)
5422                 napi_disable(&tp->napi[i].napi);
5423 }
5424
5425 static void tg3_napi_enable(struct tg3 *tp)
5426 {
5427         int i;
5428
5429         for (i = 0; i < tp->irq_cnt; i++)
5430                 napi_enable(&tp->napi[i].napi);
5431 }
5432
5433 static void tg3_napi_init(struct tg3 *tp)
5434 {
5435         int i;
5436
5437         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5438         for (i = 1; i < tp->irq_cnt; i++)
5439                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5440 }
5441
5442 static void tg3_napi_fini(struct tg3 *tp)
5443 {
5444         int i;
5445
5446         for (i = 0; i < tp->irq_cnt; i++)
5447                 netif_napi_del(&tp->napi[i].napi);
5448 }
5449
5450 static inline void tg3_netif_stop(struct tg3 *tp)
5451 {
5452         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5453         tg3_napi_disable(tp);
5454         netif_tx_disable(tp->dev);
5455 }
5456
5457 static inline void tg3_netif_start(struct tg3 *tp)
5458 {
5459         /* NOTE: unconditional netif_tx_wake_all_queues is only
5460          * appropriate so long as all callers are assured to
5461          * have free tx slots (such as after tg3_init_hw)
5462          */
5463         netif_tx_wake_all_queues(tp->dev);
5464
5465         tg3_napi_enable(tp);
5466         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5467         tg3_enable_ints(tp);
5468 }
5469
5470 static void tg3_irq_quiesce(struct tg3 *tp)
5471 {
5472         int i;
5473
5474         BUG_ON(tp->irq_sync);
5475
5476         tp->irq_sync = 1;
5477         smp_mb();
5478
5479         for (i = 0; i < tp->irq_cnt; i++)
5480                 synchronize_irq(tp->napi[i].irq_vec);
5481 }
5482
5483 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5484  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5485  * with as well.  Most of the time, this is not necessary except when
5486  * shutting down the device.
5487  */
5488 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5489 {
5490         spin_lock_bh(&tp->lock);
5491         if (irq_sync)
5492                 tg3_irq_quiesce(tp);
5493 }
5494
5495 static inline void tg3_full_unlock(struct tg3 *tp)
5496 {
5497         spin_unlock_bh(&tp->lock);
5498 }
5499
5500 /* One-shot MSI handler - Chip automatically disables interrupt
5501  * after sending MSI so driver doesn't have to do it.
5502  */
5503 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5504 {
5505         struct tg3_napi *tnapi = dev_id;
5506         struct tg3 *tp = tnapi->tp;
5507
5508         prefetch(tnapi->hw_status);
5509         if (tnapi->rx_rcb)
5510                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5511
5512         if (likely(!tg3_irq_sync(tp)))
5513                 napi_schedule(&tnapi->napi);
5514
5515         return IRQ_HANDLED;
5516 }
5517
5518 /* MSI ISR - No need to check for interrupt sharing and no need to
5519  * flush status block and interrupt mailbox. PCI ordering rules
5520  * guarantee that MSI will arrive after the status block.
5521  */
5522 static irqreturn_t tg3_msi(int irq, void *dev_id)
5523 {
5524         struct tg3_napi *tnapi = dev_id;
5525         struct tg3 *tp = tnapi->tp;
5526
5527         prefetch(tnapi->hw_status);
5528         if (tnapi->rx_rcb)
5529                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5530         /*
5531          * Writing any value to intr-mbox-0 clears PCI INTA# and
5532          * chip-internal interrupt pending events.
5533          * Writing non-zero to intr-mbox-0 additional tells the
5534          * NIC to stop sending us irqs, engaging "in-intr-handler"
5535          * event coalescing.
5536          */
5537         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5538         if (likely(!tg3_irq_sync(tp)))
5539                 napi_schedule(&tnapi->napi);
5540
5541         return IRQ_RETVAL(1);
5542 }
5543
5544 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5545 {
5546         struct tg3_napi *tnapi = dev_id;
5547         struct tg3 *tp = tnapi->tp;
5548         struct tg3_hw_status *sblk = tnapi->hw_status;
5549         unsigned int handled = 1;
5550
5551         /* In INTx mode, it is possible for the interrupt to arrive at
5552          * the CPU before the status block posted prior to the interrupt.
5553          * Reading the PCI State register will confirm whether the
5554          * interrupt is ours and will flush the status block.
5555          */
5556         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5557                 if (tg3_flag(tp, CHIP_RESETTING) ||
5558                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5559                         handled = 0;
5560                         goto out;
5561                 }
5562         }
5563
5564         /*
5565          * Writing any value to intr-mbox-0 clears PCI INTA# and
5566          * chip-internal interrupt pending events.
5567          * Writing non-zero to intr-mbox-0 additional tells the
5568          * NIC to stop sending us irqs, engaging "in-intr-handler"
5569          * event coalescing.
5570          *
5571          * Flush the mailbox to de-assert the IRQ immediately to prevent
5572          * spurious interrupts.  The flush impacts performance but
5573          * excessive spurious interrupts can be worse in some cases.
5574          */
5575         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5576         if (tg3_irq_sync(tp))
5577                 goto out;
5578         sblk->status &= ~SD_STATUS_UPDATED;
5579         if (likely(tg3_has_work(tnapi))) {
5580                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5581                 napi_schedule(&tnapi->napi);
5582         } else {
5583                 /* No work, shared interrupt perhaps?  re-enable
5584                  * interrupts, and flush that PCI write
5585                  */
5586                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5587                                0x00000000);
5588         }
5589 out:
5590         return IRQ_RETVAL(handled);
5591 }
5592
5593 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5594 {
5595         struct tg3_napi *tnapi = dev_id;
5596         struct tg3 *tp = tnapi->tp;
5597         struct tg3_hw_status *sblk = tnapi->hw_status;
5598         unsigned int handled = 1;
5599
5600         /* In INTx mode, it is possible for the interrupt to arrive at
5601          * the CPU before the status block posted prior to the interrupt.
5602          * Reading the PCI State register will confirm whether the
5603          * interrupt is ours and will flush the status block.
5604          */
5605         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5606                 if (tg3_flag(tp, CHIP_RESETTING) ||
5607                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5608                         handled = 0;
5609                         goto out;
5610                 }
5611         }
5612
5613         /*
5614          * writing any value to intr-mbox-0 clears PCI INTA# and
5615          * chip-internal interrupt pending events.
5616          * writing non-zero to intr-mbox-0 additional tells the
5617          * NIC to stop sending us irqs, engaging "in-intr-handler"
5618          * event coalescing.
5619          *
5620          * Flush the mailbox to de-assert the IRQ immediately to prevent
5621          * spurious interrupts.  The flush impacts performance but
5622          * excessive spurious interrupts can be worse in some cases.
5623          */
5624         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5625
5626         /*
5627          * In a shared interrupt configuration, sometimes other devices'
5628          * interrupts will scream.  We record the current status tag here
5629          * so that the above check can report that the screaming interrupts
5630          * are unhandled.  Eventually they will be silenced.
5631          */
5632         tnapi->last_irq_tag = sblk->status_tag;
5633
5634         if (tg3_irq_sync(tp))
5635                 goto out;
5636
5637         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5638
5639         napi_schedule(&tnapi->napi);
5640
5641 out:
5642         return IRQ_RETVAL(handled);
5643 }
5644
5645 /* ISR for interrupt test */
5646 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5647 {
5648         struct tg3_napi *tnapi = dev_id;
5649         struct tg3 *tp = tnapi->tp;
5650         struct tg3_hw_status *sblk = tnapi->hw_status;
5651
5652         if ((sblk->status & SD_STATUS_UPDATED) ||
5653             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5654                 tg3_disable_ints(tp);
5655                 return IRQ_RETVAL(1);
5656         }
5657         return IRQ_RETVAL(0);
5658 }
5659
5660 static int tg3_init_hw(struct tg3 *, int);
5661 static int tg3_halt(struct tg3 *, int, int);
5662
5663 /* Restart hardware after configuration changes, self-test, etc.
5664  * Invoked with tp->lock held.
5665  */
5666 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5667         __releases(tp->lock)
5668         __acquires(tp->lock)
5669 {
5670         int err;
5671
5672         err = tg3_init_hw(tp, reset_phy);
5673         if (err) {
5674                 netdev_err(tp->dev,
5675                            "Failed to re-initialize device, aborting\n");
5676                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5677                 tg3_full_unlock(tp);
5678                 del_timer_sync(&tp->timer);
5679                 tp->irq_sync = 0;
5680                 tg3_napi_enable(tp);
5681                 dev_close(tp->dev);
5682                 tg3_full_lock(tp, 0);
5683         }
5684         return err;
5685 }
5686
5687 #ifdef CONFIG_NET_POLL_CONTROLLER
5688 static void tg3_poll_controller(struct net_device *dev)
5689 {
5690         int i;
5691         struct tg3 *tp = netdev_priv(dev);
5692
5693         for (i = 0; i < tp->irq_cnt; i++)
5694                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5695 }
5696 #endif
5697
5698 static void tg3_reset_task(struct work_struct *work)
5699 {
5700         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5701         int err;
5702         unsigned int restart_timer;
5703
5704         tg3_full_lock(tp, 0);
5705
5706         if (!netif_running(tp->dev)) {
5707                 tg3_full_unlock(tp);
5708                 return;
5709         }
5710
5711         tg3_full_unlock(tp);
5712
5713         tg3_phy_stop(tp);
5714
5715         tg3_netif_stop(tp);
5716
5717         tg3_full_lock(tp, 1);
5718
5719         restart_timer = tg3_flag(tp, RESTART_TIMER);
5720         tg3_flag_clear(tp, RESTART_TIMER);
5721
5722         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5723                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5724                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5725                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5726                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5727         }
5728
5729         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5730         err = tg3_init_hw(tp, 1);
5731         if (err)
5732                 goto out;
5733
5734         tg3_netif_start(tp);
5735
5736         if (restart_timer)
5737                 mod_timer(&tp->timer, jiffies + 1);
5738
5739 out:
5740         tg3_full_unlock(tp);
5741
5742         if (!err)
5743                 tg3_phy_start(tp);
5744 }
5745
5746 static void tg3_tx_timeout(struct net_device *dev)
5747 {
5748         struct tg3 *tp = netdev_priv(dev);
5749
5750         if (netif_msg_tx_err(tp)) {
5751                 netdev_err(dev, "transmit timed out, resetting\n");
5752                 tg3_dump_state(tp);
5753         }
5754
5755         schedule_work(&tp->reset_task);
5756 }
5757
5758 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5759 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5760 {
5761         u32 base = (u32) mapping & 0xffffffff;
5762
5763         return (base > 0xffffdcc0) && (base + len + 8 < base);
5764 }
5765
5766 /* Test for DMA addresses > 40-bit */
5767 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5768                                           int len)
5769 {
5770 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5771         if (tg3_flag(tp, 40BIT_DMA_BUG))
5772                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5773         return 0;
5774 #else
5775         return 0;
5776 #endif
5777 }
5778
5779 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5780                         dma_addr_t mapping, int len, u32 flags,
5781                         u32 mss_and_is_end)
5782 {
5783         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5784         int is_end = (mss_and_is_end & 0x1);
5785         u32 mss = (mss_and_is_end >> 1);
5786         u32 vlan_tag = 0;
5787
5788         if (is_end)
5789                 flags |= TXD_FLAG_END;
5790         if (flags & TXD_FLAG_VLAN) {
5791                 vlan_tag = flags >> 16;
5792                 flags &= 0xffff;
5793         }
5794         vlan_tag |= (mss << TXD_MSS_SHIFT);
5795
5796         txd->addr_hi = ((u64) mapping >> 32);
5797         txd->addr_lo = ((u64) mapping & 0xffffffff);
5798         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5799         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5800 }
5801
5802 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5803                                 struct sk_buff *skb, int last)
5804 {
5805         int i;
5806         u32 entry = tnapi->tx_prod;
5807         struct ring_info *txb = &tnapi->tx_buffers[entry];
5808
5809         pci_unmap_single(tnapi->tp->pdev,
5810                          dma_unmap_addr(txb, mapping),
5811                          skb_headlen(skb),
5812                          PCI_DMA_TODEVICE);
5813         for (i = 0; i < last; i++) {
5814                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5815
5816                 entry = NEXT_TX(entry);
5817                 txb = &tnapi->tx_buffers[entry];
5818
5819                 pci_unmap_page(tnapi->tp->pdev,
5820                                dma_unmap_addr(txb, mapping),
5821                                frag->size, PCI_DMA_TODEVICE);
5822         }
5823 }
5824
5825 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5826 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5827                                        struct sk_buff *skb,
5828                                        u32 base_flags, u32 mss)
5829 {
5830         struct tg3 *tp = tnapi->tp;
5831         struct sk_buff *new_skb;
5832         dma_addr_t new_addr = 0;
5833         u32 entry = tnapi->tx_prod;
5834         int ret = 0;
5835
5836         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5837                 new_skb = skb_copy(skb, GFP_ATOMIC);
5838         else {
5839                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5840
5841                 new_skb = skb_copy_expand(skb,
5842                                           skb_headroom(skb) + more_headroom,
5843                                           skb_tailroom(skb), GFP_ATOMIC);
5844         }
5845
5846         if (!new_skb) {
5847                 ret = -1;
5848         } else {
5849                 /* New SKB is guaranteed to be linear. */
5850                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5851                                           PCI_DMA_TODEVICE);
5852                 /* Make sure the mapping succeeded */
5853                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5854                         ret = -1;
5855                         dev_kfree_skb(new_skb);
5856
5857                 /* Make sure new skb does not cross any 4G boundaries.
5858                  * Drop the packet if it does.
5859                  */
5860                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5861                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5862                                          PCI_DMA_TODEVICE);
5863                         ret = -1;
5864                         dev_kfree_skb(new_skb);
5865                 } else {
5866                         tnapi->tx_buffers[entry].skb = new_skb;
5867                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5868                                            mapping, new_addr);
5869
5870                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5871                                     base_flags, 1 | (mss << 1));
5872                 }
5873         }
5874
5875         dev_kfree_skb(skb);
5876
5877         return ret;
5878 }
5879
5880 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5881
5882 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5883  * TSO header is greater than 80 bytes.
5884  */
5885 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5886 {
5887         struct sk_buff *segs, *nskb;
5888         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5889
5890         /* Estimate the number of fragments in the worst case */
5891         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5892                 netif_stop_queue(tp->dev);
5893
5894                 /* netif_tx_stop_queue() must be done before checking
5895                  * checking tx index in tg3_tx_avail() below, because in
5896                  * tg3_tx(), we update tx index before checking for
5897                  * netif_tx_queue_stopped().
5898                  */
5899                 smp_mb();
5900                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5901                         return NETDEV_TX_BUSY;
5902
5903                 netif_wake_queue(tp->dev);
5904         }
5905
5906         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5907         if (IS_ERR(segs))
5908                 goto tg3_tso_bug_end;
5909
5910         do {
5911                 nskb = segs;
5912                 segs = segs->next;
5913                 nskb->next = NULL;
5914                 tg3_start_xmit(nskb, tp->dev);
5915         } while (segs);
5916
5917 tg3_tso_bug_end:
5918         dev_kfree_skb(skb);
5919
5920         return NETDEV_TX_OK;
5921 }
5922
5923 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5924  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5925  */
5926 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5927 {
5928         struct tg3 *tp = netdev_priv(dev);
5929         u32 len, entry, base_flags, mss;
5930         int i = -1, would_hit_hwbug;
5931         dma_addr_t mapping;
5932         struct tg3_napi *tnapi;
5933         struct netdev_queue *txq;
5934         unsigned int last;
5935
5936         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5937         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5938         if (tg3_flag(tp, ENABLE_TSS))
5939                 tnapi++;
5940
5941         /* We are running in BH disabled context with netif_tx_lock
5942          * and TX reclaim runs via tp->napi.poll inside of a software
5943          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5944          * no IRQ context deadlocks to worry about either.  Rejoice!
5945          */
5946         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5947                 if (!netif_tx_queue_stopped(txq)) {
5948                         netif_tx_stop_queue(txq);
5949
5950                         /* This is a hard error, log it. */
5951                         netdev_err(dev,
5952                                    "BUG! Tx Ring full when queue awake!\n");
5953                 }
5954                 return NETDEV_TX_BUSY;
5955         }
5956
5957         entry = tnapi->tx_prod;
5958         base_flags = 0;
5959         if (skb->ip_summed == CHECKSUM_PARTIAL)
5960                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5961
5962         mss = skb_shinfo(skb)->gso_size;
5963         if (mss) {
5964                 struct iphdr *iph;
5965                 u32 tcp_opt_len, hdr_len;
5966
5967                 if (skb_header_cloned(skb) &&
5968                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5969                         dev_kfree_skb(skb);
5970                         goto out_unlock;
5971                 }
5972
5973                 iph = ip_hdr(skb);
5974                 tcp_opt_len = tcp_optlen(skb);
5975
5976                 if (skb_is_gso_v6(skb)) {
5977                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5978                 } else {
5979                         u32 ip_tcp_len;
5980
5981                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5982                         hdr_len = ip_tcp_len + tcp_opt_len;
5983
5984                         iph->check = 0;
5985                         iph->tot_len = htons(mss + hdr_len);
5986                 }
5987
5988                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5989                     tg3_flag(tp, TSO_BUG))
5990                         return tg3_tso_bug(tp, skb);
5991
5992                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5993                                TXD_FLAG_CPU_POST_DMA);
5994
5995                 if (tg3_flag(tp, HW_TSO_1) ||
5996                     tg3_flag(tp, HW_TSO_2) ||
5997                     tg3_flag(tp, HW_TSO_3)) {
5998                         tcp_hdr(skb)->check = 0;
5999                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6000                 } else
6001                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6002                                                                  iph->daddr, 0,
6003                                                                  IPPROTO_TCP,
6004                                                                  0);
6005
6006                 if (tg3_flag(tp, HW_TSO_3)) {
6007                         mss |= (hdr_len & 0xc) << 12;
6008                         if (hdr_len & 0x10)
6009                                 base_flags |= 0x00000010;
6010                         base_flags |= (hdr_len & 0x3e0) << 5;
6011                 } else if (tg3_flag(tp, HW_TSO_2))
6012                         mss |= hdr_len << 9;
6013                 else if (tg3_flag(tp, HW_TSO_1) ||
6014                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6015                         if (tcp_opt_len || iph->ihl > 5) {
6016                                 int tsflags;
6017
6018                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6019                                 mss |= (tsflags << 11);
6020                         }
6021                 } else {
6022                         if (tcp_opt_len || iph->ihl > 5) {
6023                                 int tsflags;
6024
6025                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6026                                 base_flags |= tsflags << 12;
6027                         }
6028                 }
6029         }
6030
6031         if (vlan_tx_tag_present(skb))
6032                 base_flags |= (TXD_FLAG_VLAN |
6033                                (vlan_tx_tag_get(skb) << 16));
6034
6035         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6036             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6037                 base_flags |= TXD_FLAG_JMB_PKT;
6038
6039         len = skb_headlen(skb);
6040
6041         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6042         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6043                 dev_kfree_skb(skb);
6044                 goto out_unlock;
6045         }
6046
6047         tnapi->tx_buffers[entry].skb = skb;
6048         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6049
6050         would_hit_hwbug = 0;
6051
6052         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6053                 would_hit_hwbug = 1;
6054
6055         if (tg3_4g_overflow_test(mapping, len))
6056                 would_hit_hwbug = 1;
6057
6058         if (tg3_40bit_overflow_test(tp, mapping, len))
6059                 would_hit_hwbug = 1;
6060
6061         if (tg3_flag(tp, 5701_DMA_BUG))
6062                 would_hit_hwbug = 1;
6063
6064         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6065                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6066
6067         entry = NEXT_TX(entry);
6068
6069         /* Now loop through additional data fragments, and queue them. */
6070         if (skb_shinfo(skb)->nr_frags > 0) {
6071                 last = skb_shinfo(skb)->nr_frags - 1;
6072                 for (i = 0; i <= last; i++) {
6073                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6074
6075                         len = frag->size;
6076                         mapping = pci_map_page(tp->pdev,
6077                                                frag->page,
6078                                                frag->page_offset,
6079                                                len, PCI_DMA_TODEVICE);
6080
6081                         tnapi->tx_buffers[entry].skb = NULL;
6082                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6083                                            mapping);
6084                         if (pci_dma_mapping_error(tp->pdev, mapping))
6085                                 goto dma_error;
6086
6087                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6088                             len <= 8)
6089                                 would_hit_hwbug = 1;
6090
6091                         if (tg3_4g_overflow_test(mapping, len))
6092                                 would_hit_hwbug = 1;
6093
6094                         if (tg3_40bit_overflow_test(tp, mapping, len))
6095                                 would_hit_hwbug = 1;
6096
6097                         if (tg3_flag(tp, HW_TSO_1) ||
6098                             tg3_flag(tp, HW_TSO_2) ||
6099                             tg3_flag(tp, HW_TSO_3))
6100                                 tg3_set_txd(tnapi, entry, mapping, len,
6101                                             base_flags, (i == last)|(mss << 1));
6102                         else
6103                                 tg3_set_txd(tnapi, entry, mapping, len,
6104                                             base_flags, (i == last));
6105
6106                         entry = NEXT_TX(entry);
6107                 }
6108         }
6109
6110         if (would_hit_hwbug) {
6111                 tg3_skb_error_unmap(tnapi, skb, i);
6112
6113                 /* If the workaround fails due to memory/mapping
6114                  * failure, silently drop this packet.
6115                  */
6116                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6117                         goto out_unlock;
6118
6119                 entry = NEXT_TX(tnapi->tx_prod);
6120         }
6121
6122         skb_tx_timestamp(skb);
6123
6124         /* Packets are ready, update Tx producer idx local and on card. */
6125         tw32_tx_mbox(tnapi->prodmbox, entry);
6126
6127         tnapi->tx_prod = entry;
6128         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6129                 netif_tx_stop_queue(txq);
6130
6131                 /* netif_tx_stop_queue() must be done before checking
6132                  * checking tx index in tg3_tx_avail() below, because in
6133                  * tg3_tx(), we update tx index before checking for
6134                  * netif_tx_queue_stopped().
6135                  */
6136                 smp_mb();
6137                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6138                         netif_tx_wake_queue(txq);
6139         }
6140
6141 out_unlock:
6142         mmiowb();
6143
6144         return NETDEV_TX_OK;
6145
6146 dma_error:
6147         tg3_skb_error_unmap(tnapi, skb, i);
6148         dev_kfree_skb(skb);
6149         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6150         return NETDEV_TX_OK;
6151 }
6152
6153 static void tg3_set_loopback(struct net_device *dev, u32 features)
6154 {
6155         struct tg3 *tp = netdev_priv(dev);
6156
6157         if (features & NETIF_F_LOOPBACK) {
6158                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6159                         return;
6160
6161                 /*
6162                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6163                  * loopback mode if Half-Duplex mode was negotiated earlier.
6164                  */
6165                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6166
6167                 /* Enable internal MAC loopback mode */
6168                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6169                 spin_lock_bh(&tp->lock);
6170                 tw32(MAC_MODE, tp->mac_mode);
6171                 netif_carrier_on(tp->dev);
6172                 spin_unlock_bh(&tp->lock);
6173                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6174         } else {
6175                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6176                         return;
6177
6178                 /* Disable internal MAC loopback mode */
6179                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6180                 spin_lock_bh(&tp->lock);
6181                 tw32(MAC_MODE, tp->mac_mode);
6182                 /* Force link status check */
6183                 tg3_setup_phy(tp, 1);
6184                 spin_unlock_bh(&tp->lock);
6185                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6186         }
6187 }
6188
6189 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6190 {
6191         struct tg3 *tp = netdev_priv(dev);
6192
6193         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6194                 features &= ~NETIF_F_ALL_TSO;
6195
6196         return features;
6197 }
6198
6199 static int tg3_set_features(struct net_device *dev, u32 features)
6200 {
6201         u32 changed = dev->features ^ features;
6202
6203         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6204                 tg3_set_loopback(dev, features);
6205
6206         return 0;
6207 }
6208
6209 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6210                                int new_mtu)
6211 {
6212         dev->mtu = new_mtu;
6213
6214         if (new_mtu > ETH_DATA_LEN) {
6215                 if (tg3_flag(tp, 5780_CLASS)) {
6216                         netdev_update_features(dev);
6217                         tg3_flag_clear(tp, TSO_CAPABLE);
6218                 } else {
6219                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6220                 }
6221         } else {
6222                 if (tg3_flag(tp, 5780_CLASS)) {
6223                         tg3_flag_set(tp, TSO_CAPABLE);
6224                         netdev_update_features(dev);
6225                 }
6226                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6227         }
6228 }
6229
6230 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6231 {
6232         struct tg3 *tp = netdev_priv(dev);
6233         int err;
6234
6235         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6236                 return -EINVAL;
6237
6238         if (!netif_running(dev)) {
6239                 /* We'll just catch it later when the
6240                  * device is up'd.
6241                  */
6242                 tg3_set_mtu(dev, tp, new_mtu);
6243                 return 0;
6244         }
6245
6246         tg3_phy_stop(tp);
6247
6248         tg3_netif_stop(tp);
6249
6250         tg3_full_lock(tp, 1);
6251
6252         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6253
6254         tg3_set_mtu(dev, tp, new_mtu);
6255
6256         err = tg3_restart_hw(tp, 0);
6257
6258         if (!err)
6259                 tg3_netif_start(tp);
6260
6261         tg3_full_unlock(tp);
6262
6263         if (!err)
6264                 tg3_phy_start(tp);
6265
6266         return err;
6267 }
6268
6269 static void tg3_rx_prodring_free(struct tg3 *tp,
6270                                  struct tg3_rx_prodring_set *tpr)
6271 {
6272         int i;
6273
6274         if (tpr != &tp->napi[0].prodring) {
6275                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6276                      i = (i + 1) & tp->rx_std_ring_mask)
6277                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6278                                         tp->rx_pkt_map_sz);
6279
6280                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6281                         for (i = tpr->rx_jmb_cons_idx;
6282                              i != tpr->rx_jmb_prod_idx;
6283                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6284                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6285                                                 TG3_RX_JMB_MAP_SZ);
6286                         }
6287                 }
6288
6289                 return;
6290         }
6291
6292         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6293                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6294                                 tp->rx_pkt_map_sz);
6295
6296         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6297                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6298                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6299                                         TG3_RX_JMB_MAP_SZ);
6300         }
6301 }
6302
6303 /* Initialize rx rings for packet processing.
6304  *
6305  * The chip has been shut down and the driver detached from
6306  * the networking, so no interrupts or new tx packets will
6307  * end up in the driver.  tp->{tx,}lock are held and thus
6308  * we may not sleep.
6309  */
6310 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6311                                  struct tg3_rx_prodring_set *tpr)
6312 {
6313         u32 i, rx_pkt_dma_sz;
6314
6315         tpr->rx_std_cons_idx = 0;
6316         tpr->rx_std_prod_idx = 0;
6317         tpr->rx_jmb_cons_idx = 0;
6318         tpr->rx_jmb_prod_idx = 0;
6319
6320         if (tpr != &tp->napi[0].prodring) {
6321                 memset(&tpr->rx_std_buffers[0], 0,
6322                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6323                 if (tpr->rx_jmb_buffers)
6324                         memset(&tpr->rx_jmb_buffers[0], 0,
6325                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6326                 goto done;
6327         }
6328
6329         /* Zero out all descriptors. */
6330         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6331
6332         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6333         if (tg3_flag(tp, 5780_CLASS) &&
6334             tp->dev->mtu > ETH_DATA_LEN)
6335                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6336         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6337
6338         /* Initialize invariants of the rings, we only set this
6339          * stuff once.  This works because the card does not
6340          * write into the rx buffer posting rings.
6341          */
6342         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6343                 struct tg3_rx_buffer_desc *rxd;
6344
6345                 rxd = &tpr->rx_std[i];
6346                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6347                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6348                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6349                                (i << RXD_OPAQUE_INDEX_SHIFT));
6350         }
6351
6352         /* Now allocate fresh SKBs for each rx ring. */
6353         for (i = 0; i < tp->rx_pending; i++) {
6354                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6355                         netdev_warn(tp->dev,
6356                                     "Using a smaller RX standard ring. Only "
6357                                     "%d out of %d buffers were allocated "
6358                                     "successfully\n", i, tp->rx_pending);
6359                         if (i == 0)
6360                                 goto initfail;
6361                         tp->rx_pending = i;
6362                         break;
6363                 }
6364         }
6365
6366         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6367                 goto done;
6368
6369         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6370
6371         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6372                 goto done;
6373
6374         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6375                 struct tg3_rx_buffer_desc *rxd;
6376
6377                 rxd = &tpr->rx_jmb[i].std;
6378                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6379                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6380                                   RXD_FLAG_JUMBO;
6381                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6382                        (i << RXD_OPAQUE_INDEX_SHIFT));
6383         }
6384
6385         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6386                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6387                         netdev_warn(tp->dev,
6388                                     "Using a smaller RX jumbo ring. Only %d "
6389                                     "out of %d buffers were allocated "
6390                                     "successfully\n", i, tp->rx_jumbo_pending);
6391                         if (i == 0)
6392                                 goto initfail;
6393                         tp->rx_jumbo_pending = i;
6394                         break;
6395                 }
6396         }
6397
6398 done:
6399         return 0;
6400
6401 initfail:
6402         tg3_rx_prodring_free(tp, tpr);
6403         return -ENOMEM;
6404 }
6405
6406 static void tg3_rx_prodring_fini(struct tg3 *tp,
6407                                  struct tg3_rx_prodring_set *tpr)
6408 {
6409         kfree(tpr->rx_std_buffers);
6410         tpr->rx_std_buffers = NULL;
6411         kfree(tpr->rx_jmb_buffers);
6412         tpr->rx_jmb_buffers = NULL;
6413         if (tpr->rx_std) {
6414                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6415                                   tpr->rx_std, tpr->rx_std_mapping);
6416                 tpr->rx_std = NULL;
6417         }
6418         if (tpr->rx_jmb) {
6419                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6420                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6421                 tpr->rx_jmb = NULL;
6422         }
6423 }
6424
6425 static int tg3_rx_prodring_init(struct tg3 *tp,
6426                                 struct tg3_rx_prodring_set *tpr)
6427 {
6428         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6429                                       GFP_KERNEL);
6430         if (!tpr->rx_std_buffers)
6431                 return -ENOMEM;
6432
6433         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6434                                          TG3_RX_STD_RING_BYTES(tp),
6435                                          &tpr->rx_std_mapping,
6436                                          GFP_KERNEL);
6437         if (!tpr->rx_std)
6438                 goto err_out;
6439
6440         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6441                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6442                                               GFP_KERNEL);
6443                 if (!tpr->rx_jmb_buffers)
6444                         goto err_out;
6445
6446                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6447                                                  TG3_RX_JMB_RING_BYTES(tp),
6448                                                  &tpr->rx_jmb_mapping,
6449                                                  GFP_KERNEL);
6450                 if (!tpr->rx_jmb)
6451                         goto err_out;
6452         }
6453
6454         return 0;
6455
6456 err_out:
6457         tg3_rx_prodring_fini(tp, tpr);
6458         return -ENOMEM;
6459 }
6460
6461 /* Free up pending packets in all rx/tx rings.
6462  *
6463  * The chip has been shut down and the driver detached from
6464  * the networking, so no interrupts or new tx packets will
6465  * end up in the driver.  tp->{tx,}lock is not held and we are not
6466  * in an interrupt context and thus may sleep.
6467  */
6468 static void tg3_free_rings(struct tg3 *tp)
6469 {
6470         int i, j;
6471
6472         for (j = 0; j < tp->irq_cnt; j++) {
6473                 struct tg3_napi *tnapi = &tp->napi[j];
6474
6475                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6476
6477                 if (!tnapi->tx_buffers)
6478                         continue;
6479
6480                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6481                         struct ring_info *txp;
6482                         struct sk_buff *skb;
6483                         unsigned int k;
6484
6485                         txp = &tnapi->tx_buffers[i];
6486                         skb = txp->skb;
6487
6488                         if (skb == NULL) {
6489                                 i++;
6490                                 continue;
6491                         }
6492
6493                         pci_unmap_single(tp->pdev,
6494                                          dma_unmap_addr(txp, mapping),
6495                                          skb_headlen(skb),
6496                                          PCI_DMA_TODEVICE);
6497                         txp->skb = NULL;
6498
6499                         i++;
6500
6501                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6502                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6503                                 pci_unmap_page(tp->pdev,
6504                                                dma_unmap_addr(txp, mapping),
6505                                                skb_shinfo(skb)->frags[k].size,
6506                                                PCI_DMA_TODEVICE);
6507                                 i++;
6508                         }
6509
6510                         dev_kfree_skb_any(skb);
6511                 }
6512         }
6513 }
6514
6515 /* Initialize tx/rx rings for packet processing.
6516  *
6517  * The chip has been shut down and the driver detached from
6518  * the networking, so no interrupts or new tx packets will
6519  * end up in the driver.  tp->{tx,}lock are held and thus
6520  * we may not sleep.
6521  */
6522 static int tg3_init_rings(struct tg3 *tp)
6523 {
6524         int i;
6525
6526         /* Free up all the SKBs. */
6527         tg3_free_rings(tp);
6528
6529         for (i = 0; i < tp->irq_cnt; i++) {
6530                 struct tg3_napi *tnapi = &tp->napi[i];
6531
6532                 tnapi->last_tag = 0;
6533                 tnapi->last_irq_tag = 0;
6534                 tnapi->hw_status->status = 0;
6535                 tnapi->hw_status->status_tag = 0;
6536                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6537
6538                 tnapi->tx_prod = 0;
6539                 tnapi->tx_cons = 0;
6540                 if (tnapi->tx_ring)
6541                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6542
6543                 tnapi->rx_rcb_ptr = 0;
6544                 if (tnapi->rx_rcb)
6545                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6546
6547                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6548                         tg3_free_rings(tp);
6549                         return -ENOMEM;
6550                 }
6551         }
6552
6553         return 0;
6554 }
6555
6556 /*
6557  * Must not be invoked with interrupt sources disabled and
6558  * the hardware shutdown down.
6559  */
6560 static void tg3_free_consistent(struct tg3 *tp)
6561 {
6562         int i;
6563
6564         for (i = 0; i < tp->irq_cnt; i++) {
6565                 struct tg3_napi *tnapi = &tp->napi[i];
6566
6567                 if (tnapi->tx_ring) {
6568                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6569                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6570                         tnapi->tx_ring = NULL;
6571                 }
6572
6573                 kfree(tnapi->tx_buffers);
6574                 tnapi->tx_buffers = NULL;
6575
6576                 if (tnapi->rx_rcb) {
6577                         dma_free_coherent(&tp->pdev->dev,
6578                                           TG3_RX_RCB_RING_BYTES(tp),
6579                                           tnapi->rx_rcb,
6580                                           tnapi->rx_rcb_mapping);
6581                         tnapi->rx_rcb = NULL;
6582                 }
6583
6584                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6585
6586                 if (tnapi->hw_status) {
6587                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6588                                           tnapi->hw_status,
6589                                           tnapi->status_mapping);
6590                         tnapi->hw_status = NULL;
6591                 }
6592         }
6593
6594         if (tp->hw_stats) {
6595                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6596                                   tp->hw_stats, tp->stats_mapping);
6597                 tp->hw_stats = NULL;
6598         }
6599 }
6600
6601 /*
6602  * Must not be invoked with interrupt sources disabled and
6603  * the hardware shutdown down.  Can sleep.
6604  */
6605 static int tg3_alloc_consistent(struct tg3 *tp)
6606 {
6607         int i;
6608
6609         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6610                                           sizeof(struct tg3_hw_stats),
6611                                           &tp->stats_mapping,
6612                                           GFP_KERNEL);
6613         if (!tp->hw_stats)
6614                 goto err_out;
6615
6616         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6617
6618         for (i = 0; i < tp->irq_cnt; i++) {
6619                 struct tg3_napi *tnapi = &tp->napi[i];
6620                 struct tg3_hw_status *sblk;
6621
6622                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6623                                                       TG3_HW_STATUS_SIZE,
6624                                                       &tnapi->status_mapping,
6625                                                       GFP_KERNEL);
6626                 if (!tnapi->hw_status)
6627                         goto err_out;
6628
6629                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6630                 sblk = tnapi->hw_status;
6631
6632                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6633                         goto err_out;
6634
6635                 /* If multivector TSS is enabled, vector 0 does not handle
6636                  * tx interrupts.  Don't allocate any resources for it.
6637                  */
6638                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6639                     (i && tg3_flag(tp, ENABLE_TSS))) {
6640                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6641                                                     TG3_TX_RING_SIZE,
6642                                                     GFP_KERNEL);
6643                         if (!tnapi->tx_buffers)
6644                                 goto err_out;
6645
6646                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6647                                                             TG3_TX_RING_BYTES,
6648                                                         &tnapi->tx_desc_mapping,
6649                                                             GFP_KERNEL);
6650                         if (!tnapi->tx_ring)
6651                                 goto err_out;
6652                 }
6653
6654                 /*
6655                  * When RSS is enabled, the status block format changes
6656                  * slightly.  The "rx_jumbo_consumer", "reserved",
6657                  * and "rx_mini_consumer" members get mapped to the
6658                  * other three rx return ring producer indexes.
6659                  */
6660                 switch (i) {
6661                 default:
6662                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6663                         break;
6664                 case 2:
6665                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6666                         break;
6667                 case 3:
6668                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6669                         break;
6670                 case 4:
6671                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6672                         break;
6673                 }
6674
6675                 /*
6676                  * If multivector RSS is enabled, vector 0 does not handle
6677                  * rx or tx interrupts.  Don't allocate any resources for it.
6678                  */
6679                 if (!i && tg3_flag(tp, ENABLE_RSS))
6680                         continue;
6681
6682                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6683                                                    TG3_RX_RCB_RING_BYTES(tp),
6684                                                    &tnapi->rx_rcb_mapping,
6685                                                    GFP_KERNEL);
6686                 if (!tnapi->rx_rcb)
6687                         goto err_out;
6688
6689                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6690         }
6691
6692         return 0;
6693
6694 err_out:
6695         tg3_free_consistent(tp);
6696         return -ENOMEM;
6697 }
6698
6699 #define MAX_WAIT_CNT 1000
6700
6701 /* To stop a block, clear the enable bit and poll till it
6702  * clears.  tp->lock is held.
6703  */
6704 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6705 {
6706         unsigned int i;
6707         u32 val;
6708
6709         if (tg3_flag(tp, 5705_PLUS)) {
6710                 switch (ofs) {
6711                 case RCVLSC_MODE:
6712                 case DMAC_MODE:
6713                 case MBFREE_MODE:
6714                 case BUFMGR_MODE:
6715                 case MEMARB_MODE:
6716                         /* We can't enable/disable these bits of the
6717                          * 5705/5750, just say success.
6718                          */
6719                         return 0;
6720
6721                 default:
6722                         break;
6723                 }
6724         }
6725
6726         val = tr32(ofs);
6727         val &= ~enable_bit;
6728         tw32_f(ofs, val);
6729
6730         for (i = 0; i < MAX_WAIT_CNT; i++) {
6731                 udelay(100);
6732                 val = tr32(ofs);
6733                 if ((val & enable_bit) == 0)
6734                         break;
6735         }
6736
6737         if (i == MAX_WAIT_CNT && !silent) {
6738                 dev_err(&tp->pdev->dev,
6739                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6740                         ofs, enable_bit);
6741                 return -ENODEV;
6742         }
6743
6744         return 0;
6745 }
6746
6747 /* tp->lock is held. */
6748 static int tg3_abort_hw(struct tg3 *tp, int silent)
6749 {
6750         int i, err;
6751
6752         tg3_disable_ints(tp);
6753
6754         tp->rx_mode &= ~RX_MODE_ENABLE;
6755         tw32_f(MAC_RX_MODE, tp->rx_mode);
6756         udelay(10);
6757
6758         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6759         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6760         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6761         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6762         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6763         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6764
6765         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6766         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6767         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6768         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6769         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6770         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6771         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6772
6773         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6774         tw32_f(MAC_MODE, tp->mac_mode);
6775         udelay(40);
6776
6777         tp->tx_mode &= ~TX_MODE_ENABLE;
6778         tw32_f(MAC_TX_MODE, tp->tx_mode);
6779
6780         for (i = 0; i < MAX_WAIT_CNT; i++) {
6781                 udelay(100);
6782                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6783                         break;
6784         }
6785         if (i >= MAX_WAIT_CNT) {
6786                 dev_err(&tp->pdev->dev,
6787                         "%s timed out, TX_MODE_ENABLE will not clear "
6788                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6789                 err |= -ENODEV;
6790         }
6791
6792         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6793         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6794         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6795
6796         tw32(FTQ_RESET, 0xffffffff);
6797         tw32(FTQ_RESET, 0x00000000);
6798
6799         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6800         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6801
6802         for (i = 0; i < tp->irq_cnt; i++) {
6803                 struct tg3_napi *tnapi = &tp->napi[i];
6804                 if (tnapi->hw_status)
6805                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6806         }
6807         if (tp->hw_stats)
6808                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6809
6810         return err;
6811 }
6812
6813 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6814 {
6815         int i;
6816         u32 apedata;
6817
6818         /* NCSI does not support APE events */
6819         if (tg3_flag(tp, APE_HAS_NCSI))
6820                 return;
6821
6822         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6823         if (apedata != APE_SEG_SIG_MAGIC)
6824                 return;
6825
6826         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6827         if (!(apedata & APE_FW_STATUS_READY))
6828                 return;
6829
6830         /* Wait for up to 1 millisecond for APE to service previous event. */
6831         for (i = 0; i < 10; i++) {
6832                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6833                         return;
6834
6835                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6836
6837                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6838                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6839                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6840
6841                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6842
6843                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6844                         break;
6845
6846                 udelay(100);
6847         }
6848
6849         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6850                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6851 }
6852
6853 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6854 {
6855         u32 event;
6856         u32 apedata;
6857
6858         if (!tg3_flag(tp, ENABLE_APE))
6859                 return;
6860
6861         switch (kind) {
6862         case RESET_KIND_INIT:
6863                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6864                                 APE_HOST_SEG_SIG_MAGIC);
6865                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6866                                 APE_HOST_SEG_LEN_MAGIC);
6867                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6868                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6869                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6870                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6871                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6872                                 APE_HOST_BEHAV_NO_PHYLOCK);
6873                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6874                                     TG3_APE_HOST_DRVR_STATE_START);
6875
6876                 event = APE_EVENT_STATUS_STATE_START;
6877                 break;
6878         case RESET_KIND_SHUTDOWN:
6879                 /* With the interface we are currently using,
6880                  * APE does not track driver state.  Wiping
6881                  * out the HOST SEGMENT SIGNATURE forces
6882                  * the APE to assume OS absent status.
6883                  */
6884                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6885
6886                 if (device_may_wakeup(&tp->pdev->dev) &&
6887                     tg3_flag(tp, WOL_ENABLE)) {
6888                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6889                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6890                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6891                 } else
6892                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6893
6894                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6895
6896                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6897                 break;
6898         case RESET_KIND_SUSPEND:
6899                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6900                 break;
6901         default:
6902                 return;
6903         }
6904
6905         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6906
6907         tg3_ape_send_event(tp, event);
6908 }
6909
6910 /* tp->lock is held. */
6911 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6912 {
6913         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6914                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6915
6916         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6917                 switch (kind) {
6918                 case RESET_KIND_INIT:
6919                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6920                                       DRV_STATE_START);
6921                         break;
6922
6923                 case RESET_KIND_SHUTDOWN:
6924                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6925                                       DRV_STATE_UNLOAD);
6926                         break;
6927
6928                 case RESET_KIND_SUSPEND:
6929                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6930                                       DRV_STATE_SUSPEND);
6931                         break;
6932
6933                 default:
6934                         break;
6935                 }
6936         }
6937
6938         if (kind == RESET_KIND_INIT ||
6939             kind == RESET_KIND_SUSPEND)
6940                 tg3_ape_driver_state_change(tp, kind);
6941 }
6942
6943 /* tp->lock is held. */
6944 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6945 {
6946         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6947                 switch (kind) {
6948                 case RESET_KIND_INIT:
6949                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6950                                       DRV_STATE_START_DONE);
6951                         break;
6952
6953                 case RESET_KIND_SHUTDOWN:
6954                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6955                                       DRV_STATE_UNLOAD_DONE);
6956                         break;
6957
6958                 default:
6959                         break;
6960                 }
6961         }
6962
6963         if (kind == RESET_KIND_SHUTDOWN)
6964                 tg3_ape_driver_state_change(tp, kind);
6965 }
6966
6967 /* tp->lock is held. */
6968 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6969 {
6970         if (tg3_flag(tp, ENABLE_ASF)) {
6971                 switch (kind) {
6972                 case RESET_KIND_INIT:
6973                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6974                                       DRV_STATE_START);
6975                         break;
6976
6977                 case RESET_KIND_SHUTDOWN:
6978                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6979                                       DRV_STATE_UNLOAD);
6980                         break;
6981
6982                 case RESET_KIND_SUSPEND:
6983                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6984                                       DRV_STATE_SUSPEND);
6985                         break;
6986
6987                 default:
6988                         break;
6989                 }
6990         }
6991 }
6992
6993 static int tg3_poll_fw(struct tg3 *tp)
6994 {
6995         int i;
6996         u32 val;
6997
6998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6999                 /* Wait up to 20ms for init done. */
7000                 for (i = 0; i < 200; i++) {
7001                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7002                                 return 0;
7003                         udelay(100);
7004                 }
7005                 return -ENODEV;
7006         }
7007
7008         /* Wait for firmware initialization to complete. */
7009         for (i = 0; i < 100000; i++) {
7010                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7011                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7012                         break;
7013                 udelay(10);
7014         }
7015
7016         /* Chip might not be fitted with firmware.  Some Sun onboard
7017          * parts are configured like that.  So don't signal the timeout
7018          * of the above loop as an error, but do report the lack of
7019          * running firmware once.
7020          */
7021         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7022                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7023
7024                 netdev_info(tp->dev, "No firmware running\n");
7025         }
7026
7027         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7028                 /* The 57765 A0 needs a little more
7029                  * time to do some important work.
7030                  */
7031                 mdelay(10);
7032         }
7033
7034         return 0;
7035 }
7036
7037 /* Save PCI command register before chip reset */
7038 static void tg3_save_pci_state(struct tg3 *tp)
7039 {
7040         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7041 }
7042
7043 /* Restore PCI state after chip reset */
7044 static void tg3_restore_pci_state(struct tg3 *tp)
7045 {
7046         u32 val;
7047
7048         /* Re-enable indirect register accesses. */
7049         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7050                                tp->misc_host_ctrl);
7051
7052         /* Set MAX PCI retry to zero. */
7053         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7054         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7055             tg3_flag(tp, PCIX_MODE))
7056                 val |= PCISTATE_RETRY_SAME_DMA;
7057         /* Allow reads and writes to the APE register and memory space. */
7058         if (tg3_flag(tp, ENABLE_APE))
7059                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7060                        PCISTATE_ALLOW_APE_SHMEM_WR |
7061                        PCISTATE_ALLOW_APE_PSPACE_WR;
7062         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7063
7064         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7065
7066         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7067                 if (tg3_flag(tp, PCI_EXPRESS))
7068                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7069                 else {
7070                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7071                                               tp->pci_cacheline_sz);
7072                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7073                                               tp->pci_lat_timer);
7074                 }
7075         }
7076
7077         /* Make sure PCI-X relaxed ordering bit is clear. */
7078         if (tg3_flag(tp, PCIX_MODE)) {
7079                 u16 pcix_cmd;
7080
7081                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7082                                      &pcix_cmd);
7083                 pcix_cmd &= ~PCI_X_CMD_ERO;
7084                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7085                                       pcix_cmd);
7086         }
7087
7088         if (tg3_flag(tp, 5780_CLASS)) {
7089
7090                 /* Chip reset on 5780 will reset MSI enable bit,
7091                  * so need to restore it.
7092                  */
7093                 if (tg3_flag(tp, USING_MSI)) {
7094                         u16 ctrl;
7095
7096                         pci_read_config_word(tp->pdev,
7097                                              tp->msi_cap + PCI_MSI_FLAGS,
7098                                              &ctrl);
7099                         pci_write_config_word(tp->pdev,
7100                                               tp->msi_cap + PCI_MSI_FLAGS,
7101                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7102                         val = tr32(MSGINT_MODE);
7103                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7104                 }
7105         }
7106 }
7107
7108 static void tg3_stop_fw(struct tg3 *);
7109
7110 /* tp->lock is held. */
7111 static int tg3_chip_reset(struct tg3 *tp)
7112 {
7113         u32 val;
7114         void (*write_op)(struct tg3 *, u32, u32);
7115         int i, err;
7116
7117         tg3_nvram_lock(tp);
7118
7119         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7120
7121         /* No matching tg3_nvram_unlock() after this because
7122          * chip reset below will undo the nvram lock.
7123          */
7124         tp->nvram_lock_cnt = 0;
7125
7126         /* GRC_MISC_CFG core clock reset will clear the memory
7127          * enable bit in PCI register 4 and the MSI enable bit
7128          * on some chips, so we save relevant registers here.
7129          */
7130         tg3_save_pci_state(tp);
7131
7132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7133             tg3_flag(tp, 5755_PLUS))
7134                 tw32(GRC_FASTBOOT_PC, 0);
7135
7136         /*
7137          * We must avoid the readl() that normally takes place.
7138          * It locks machines, causes machine checks, and other
7139          * fun things.  So, temporarily disable the 5701
7140          * hardware workaround, while we do the reset.
7141          */
7142         write_op = tp->write32;
7143         if (write_op == tg3_write_flush_reg32)
7144                 tp->write32 = tg3_write32;
7145
7146         /* Prevent the irq handler from reading or writing PCI registers
7147          * during chip reset when the memory enable bit in the PCI command
7148          * register may be cleared.  The chip does not generate interrupt
7149          * at this time, but the irq handler may still be called due to irq
7150          * sharing or irqpoll.
7151          */
7152         tg3_flag_set(tp, CHIP_RESETTING);
7153         for (i = 0; i < tp->irq_cnt; i++) {
7154                 struct tg3_napi *tnapi = &tp->napi[i];
7155                 if (tnapi->hw_status) {
7156                         tnapi->hw_status->status = 0;
7157                         tnapi->hw_status->status_tag = 0;
7158                 }
7159                 tnapi->last_tag = 0;
7160                 tnapi->last_irq_tag = 0;
7161         }
7162         smp_mb();
7163
7164         for (i = 0; i < tp->irq_cnt; i++)
7165                 synchronize_irq(tp->napi[i].irq_vec);
7166
7167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7168                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7169                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7170         }
7171
7172         /* do the reset */
7173         val = GRC_MISC_CFG_CORECLK_RESET;
7174
7175         if (tg3_flag(tp, PCI_EXPRESS)) {
7176                 /* Force PCIe 1.0a mode */
7177                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7178                     !tg3_flag(tp, 57765_PLUS) &&
7179                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7180                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7181                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7182
7183                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7184                         tw32(GRC_MISC_CFG, (1 << 29));
7185                         val |= (1 << 29);
7186                 }
7187         }
7188
7189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7190                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7191                 tw32(GRC_VCPU_EXT_CTRL,
7192                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7193         }
7194
7195         /* Manage gphy power for all CPMU absent PCIe devices. */
7196         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7197                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7198
7199         tw32(GRC_MISC_CFG, val);
7200
7201         /* restore 5701 hardware bug workaround write method */
7202         tp->write32 = write_op;
7203
7204         /* Unfortunately, we have to delay before the PCI read back.
7205          * Some 575X chips even will not respond to a PCI cfg access
7206          * when the reset command is given to the chip.
7207          *
7208          * How do these hardware designers expect things to work
7209          * properly if the PCI write is posted for a long period
7210          * of time?  It is always necessary to have some method by
7211          * which a register read back can occur to push the write
7212          * out which does the reset.
7213          *
7214          * For most tg3 variants the trick below was working.
7215          * Ho hum...
7216          */
7217         udelay(120);
7218
7219         /* Flush PCI posted writes.  The normal MMIO registers
7220          * are inaccessible at this time so this is the only
7221          * way to make this reliably (actually, this is no longer
7222          * the case, see above).  I tried to use indirect
7223          * register read/write but this upset some 5701 variants.
7224          */
7225         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7226
7227         udelay(120);
7228
7229         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7230                 u16 val16;
7231
7232                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7233                         int i;
7234                         u32 cfg_val;
7235
7236                         /* Wait for link training to complete.  */
7237                         for (i = 0; i < 5000; i++)
7238                                 udelay(100);
7239
7240                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7241                         pci_write_config_dword(tp->pdev, 0xc4,
7242                                                cfg_val | (1 << 15));
7243                 }
7244
7245                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7246                 pci_read_config_word(tp->pdev,
7247                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7248                                      &val16);
7249                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7250                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7251                 /*
7252                  * Older PCIe devices only support the 128 byte
7253                  * MPS setting.  Enforce the restriction.
7254                  */
7255                 if (!tg3_flag(tp, CPMU_PRESENT))
7256                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7257                 pci_write_config_word(tp->pdev,
7258                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7259                                       val16);
7260
7261                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7262
7263                 /* Clear error status */
7264                 pci_write_config_word(tp->pdev,
7265                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7266                                       PCI_EXP_DEVSTA_CED |
7267                                       PCI_EXP_DEVSTA_NFED |
7268                                       PCI_EXP_DEVSTA_FED |
7269                                       PCI_EXP_DEVSTA_URD);
7270         }
7271
7272         tg3_restore_pci_state(tp);
7273
7274         tg3_flag_clear(tp, CHIP_RESETTING);
7275         tg3_flag_clear(tp, ERROR_PROCESSED);
7276
7277         val = 0;
7278         if (tg3_flag(tp, 5780_CLASS))
7279                 val = tr32(MEMARB_MODE);
7280         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7281
7282         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7283                 tg3_stop_fw(tp);
7284                 tw32(0x5000, 0x400);
7285         }
7286
7287         tw32(GRC_MODE, tp->grc_mode);
7288
7289         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7290                 val = tr32(0xc4);
7291
7292                 tw32(0xc4, val | (1 << 15));
7293         }
7294
7295         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7297                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7298                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7299                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7300                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7301         }
7302
7303         if (tg3_flag(tp, ENABLE_APE))
7304                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7305                                MAC_MODE_APE_RX_EN |
7306                                MAC_MODE_TDE_ENABLE;
7307
7308         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7309                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7310                 val = tp->mac_mode;
7311         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7312                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7313                 val = tp->mac_mode;
7314         } else
7315                 val = 0;
7316
7317         tw32_f(MAC_MODE, val);
7318         udelay(40);
7319
7320         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7321
7322         err = tg3_poll_fw(tp);
7323         if (err)
7324                 return err;
7325
7326         tg3_mdio_start(tp);
7327
7328         if (tg3_flag(tp, PCI_EXPRESS) &&
7329             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7330             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7331             !tg3_flag(tp, 57765_PLUS)) {
7332                 val = tr32(0x7c00);
7333
7334                 tw32(0x7c00, val | (1 << 25));
7335         }
7336
7337         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7338                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7339                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7340         }
7341
7342         /* Reprobe ASF enable state.  */
7343         tg3_flag_clear(tp, ENABLE_ASF);
7344         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7345         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7346         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7347                 u32 nic_cfg;
7348
7349                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7350                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7351                         tg3_flag_set(tp, ENABLE_ASF);
7352                         tp->last_event_jiffies = jiffies;
7353                         if (tg3_flag(tp, 5750_PLUS))
7354                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7355                 }
7356         }
7357
7358         return 0;
7359 }
7360
7361 /* tp->lock is held. */
7362 static void tg3_stop_fw(struct tg3 *tp)
7363 {
7364         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7365                 /* Wait for RX cpu to ACK the previous event. */
7366                 tg3_wait_for_event_ack(tp);
7367
7368                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7369
7370                 tg3_generate_fw_event(tp);
7371
7372                 /* Wait for RX cpu to ACK this event. */
7373                 tg3_wait_for_event_ack(tp);
7374         }
7375 }
7376
7377 /* tp->lock is held. */
7378 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7379 {
7380         int err;
7381
7382         tg3_stop_fw(tp);
7383
7384         tg3_write_sig_pre_reset(tp, kind);
7385
7386         tg3_abort_hw(tp, silent);
7387         err = tg3_chip_reset(tp);
7388
7389         __tg3_set_mac_addr(tp, 0);
7390
7391         tg3_write_sig_legacy(tp, kind);
7392         tg3_write_sig_post_reset(tp, kind);
7393
7394         if (err)
7395                 return err;
7396
7397         return 0;
7398 }
7399
7400 #define RX_CPU_SCRATCH_BASE     0x30000
7401 #define RX_CPU_SCRATCH_SIZE     0x04000
7402 #define TX_CPU_SCRATCH_BASE     0x34000
7403 #define TX_CPU_SCRATCH_SIZE     0x04000
7404
7405 /* tp->lock is held. */
7406 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7407 {
7408         int i;
7409
7410         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7411
7412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7413                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7414
7415                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7416                 return 0;
7417         }
7418         if (offset == RX_CPU_BASE) {
7419                 for (i = 0; i < 10000; i++) {
7420                         tw32(offset + CPU_STATE, 0xffffffff);
7421                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7422                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7423                                 break;
7424                 }
7425
7426                 tw32(offset + CPU_STATE, 0xffffffff);
7427                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7428                 udelay(10);
7429         } else {
7430                 for (i = 0; i < 10000; i++) {
7431                         tw32(offset + CPU_STATE, 0xffffffff);
7432                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7433                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7434                                 break;
7435                 }
7436         }
7437
7438         if (i >= 10000) {
7439                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7440                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7441                 return -ENODEV;
7442         }
7443
7444         /* Clear firmware's nvram arbitration. */
7445         if (tg3_flag(tp, NVRAM))
7446                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7447         return 0;
7448 }
7449
7450 struct fw_info {
7451         unsigned int fw_base;
7452         unsigned int fw_len;
7453         const __be32 *fw_data;
7454 };
7455
7456 /* tp->lock is held. */
7457 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7458                                  int cpu_scratch_size, struct fw_info *info)
7459 {
7460         int err, lock_err, i;
7461         void (*write_op)(struct tg3 *, u32, u32);
7462
7463         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7464                 netdev_err(tp->dev,
7465                            "%s: Trying to load TX cpu firmware which is 5705\n",
7466                            __func__);
7467                 return -EINVAL;
7468         }
7469
7470         if (tg3_flag(tp, 5705_PLUS))
7471                 write_op = tg3_write_mem;
7472         else
7473                 write_op = tg3_write_indirect_reg32;
7474
7475         /* It is possible that bootcode is still loading at this point.
7476          * Get the nvram lock first before halting the cpu.
7477          */
7478         lock_err = tg3_nvram_lock(tp);
7479         err = tg3_halt_cpu(tp, cpu_base);
7480         if (!lock_err)
7481                 tg3_nvram_unlock(tp);
7482         if (err)
7483                 goto out;
7484
7485         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7486                 write_op(tp, cpu_scratch_base + i, 0);
7487         tw32(cpu_base + CPU_STATE, 0xffffffff);
7488         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7489         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7490                 write_op(tp, (cpu_scratch_base +
7491                               (info->fw_base & 0xffff) +
7492                               (i * sizeof(u32))),
7493                               be32_to_cpu(info->fw_data[i]));
7494
7495         err = 0;
7496
7497 out:
7498         return err;
7499 }
7500
7501 /* tp->lock is held. */
7502 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7503 {
7504         struct fw_info info;
7505         const __be32 *fw_data;
7506         int err, i;
7507
7508         fw_data = (void *)tp->fw->data;
7509
7510         /* Firmware blob starts with version numbers, followed by
7511            start address and length. We are setting complete length.
7512            length = end_address_of_bss - start_address_of_text.
7513            Remainder is the blob to be loaded contiguously
7514            from start address. */
7515
7516         info.fw_base = be32_to_cpu(fw_data[1]);
7517         info.fw_len = tp->fw->size - 12;
7518         info.fw_data = &fw_data[3];
7519
7520         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7521                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7522                                     &info);
7523         if (err)
7524                 return err;
7525
7526         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7527                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7528                                     &info);
7529         if (err)
7530                 return err;
7531
7532         /* Now startup only the RX cpu. */
7533         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7534         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7535
7536         for (i = 0; i < 5; i++) {
7537                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7538                         break;
7539                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7540                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7541                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7542                 udelay(1000);
7543         }
7544         if (i >= 5) {
7545                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7546                            "should be %08x\n", __func__,
7547                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7548                 return -ENODEV;
7549         }
7550         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7551         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7552
7553         return 0;
7554 }
7555
7556 /* tp->lock is held. */
7557 static int tg3_load_tso_firmware(struct tg3 *tp)
7558 {
7559         struct fw_info info;
7560         const __be32 *fw_data;
7561         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7562         int err, i;
7563
7564         if (tg3_flag(tp, HW_TSO_1) ||
7565             tg3_flag(tp, HW_TSO_2) ||
7566             tg3_flag(tp, HW_TSO_3))
7567                 return 0;
7568
7569         fw_data = (void *)tp->fw->data;
7570
7571         /* Firmware blob starts with version numbers, followed by
7572            start address and length. We are setting complete length.
7573            length = end_address_of_bss - start_address_of_text.
7574            Remainder is the blob to be loaded contiguously
7575            from start address. */
7576
7577         info.fw_base = be32_to_cpu(fw_data[1]);
7578         cpu_scratch_size = tp->fw_len;
7579         info.fw_len = tp->fw->size - 12;
7580         info.fw_data = &fw_data[3];
7581
7582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7583                 cpu_base = RX_CPU_BASE;
7584                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7585         } else {
7586                 cpu_base = TX_CPU_BASE;
7587                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7588                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7589         }
7590
7591         err = tg3_load_firmware_cpu(tp, cpu_base,
7592                                     cpu_scratch_base, cpu_scratch_size,
7593                                     &info);
7594         if (err)
7595                 return err;
7596
7597         /* Now startup the cpu. */
7598         tw32(cpu_base + CPU_STATE, 0xffffffff);
7599         tw32_f(cpu_base + CPU_PC, info.fw_base);
7600
7601         for (i = 0; i < 5; i++) {
7602                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7603                         break;
7604                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7605                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7606                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7607                 udelay(1000);
7608         }
7609         if (i >= 5) {
7610                 netdev_err(tp->dev,
7611                            "%s fails to set CPU PC, is %08x should be %08x\n",
7612                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7613                 return -ENODEV;
7614         }
7615         tw32(cpu_base + CPU_STATE, 0xffffffff);
7616         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7617         return 0;
7618 }
7619
7620
7621 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7622 {
7623         struct tg3 *tp = netdev_priv(dev);
7624         struct sockaddr *addr = p;
7625         int err = 0, skip_mac_1 = 0;
7626
7627         if (!is_valid_ether_addr(addr->sa_data))
7628                 return -EINVAL;
7629
7630         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7631
7632         if (!netif_running(dev))
7633                 return 0;
7634
7635         if (tg3_flag(tp, ENABLE_ASF)) {
7636                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7637
7638                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7639                 addr0_low = tr32(MAC_ADDR_0_LOW);
7640                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7641                 addr1_low = tr32(MAC_ADDR_1_LOW);
7642
7643                 /* Skip MAC addr 1 if ASF is using it. */
7644                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7645                     !(addr1_high == 0 && addr1_low == 0))
7646                         skip_mac_1 = 1;
7647         }
7648         spin_lock_bh(&tp->lock);
7649         __tg3_set_mac_addr(tp, skip_mac_1);
7650         spin_unlock_bh(&tp->lock);
7651
7652         return err;
7653 }
7654
7655 /* tp->lock is held. */
7656 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7657                            dma_addr_t mapping, u32 maxlen_flags,
7658                            u32 nic_addr)
7659 {
7660         tg3_write_mem(tp,
7661                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7662                       ((u64) mapping >> 32));
7663         tg3_write_mem(tp,
7664                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7665                       ((u64) mapping & 0xffffffff));
7666         tg3_write_mem(tp,
7667                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7668                        maxlen_flags);
7669
7670         if (!tg3_flag(tp, 5705_PLUS))
7671                 tg3_write_mem(tp,
7672                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7673                               nic_addr);
7674 }
7675
7676 static void __tg3_set_rx_mode(struct net_device *);
7677 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7678 {
7679         int i;
7680
7681         if (!tg3_flag(tp, ENABLE_TSS)) {
7682                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7683                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7684                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7685         } else {
7686                 tw32(HOSTCC_TXCOL_TICKS, 0);
7687                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7688                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7689         }
7690
7691         if (!tg3_flag(tp, ENABLE_RSS)) {
7692                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7693                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7694                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7695         } else {
7696                 tw32(HOSTCC_RXCOL_TICKS, 0);
7697                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7698                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7699         }
7700
7701         if (!tg3_flag(tp, 5705_PLUS)) {
7702                 u32 val = ec->stats_block_coalesce_usecs;
7703
7704                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7705                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7706
7707                 if (!netif_carrier_ok(tp->dev))
7708                         val = 0;
7709
7710                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7711         }
7712
7713         for (i = 0; i < tp->irq_cnt - 1; i++) {
7714                 u32 reg;
7715
7716                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7717                 tw32(reg, ec->rx_coalesce_usecs);
7718                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7719                 tw32(reg, ec->rx_max_coalesced_frames);
7720                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7721                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7722
7723                 if (tg3_flag(tp, ENABLE_TSS)) {
7724                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7725                         tw32(reg, ec->tx_coalesce_usecs);
7726                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7727                         tw32(reg, ec->tx_max_coalesced_frames);
7728                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7729                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7730                 }
7731         }
7732
7733         for (; i < tp->irq_max - 1; i++) {
7734                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7735                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7736                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7737
7738                 if (tg3_flag(tp, ENABLE_TSS)) {
7739                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7740                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7741                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7742                 }
7743         }
7744 }
7745
7746 /* tp->lock is held. */
7747 static void tg3_rings_reset(struct tg3 *tp)
7748 {
7749         int i;
7750         u32 stblk, txrcb, rxrcb, limit;
7751         struct tg3_napi *tnapi = &tp->napi[0];
7752
7753         /* Disable all transmit rings but the first. */
7754         if (!tg3_flag(tp, 5705_PLUS))
7755                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7756         else if (tg3_flag(tp, 5717_PLUS))
7757                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7758         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7759                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7760         else
7761                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7762
7763         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7764              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7765                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7766                               BDINFO_FLAGS_DISABLED);
7767
7768
7769         /* Disable all receive return rings but the first. */
7770         if (tg3_flag(tp, 5717_PLUS))
7771                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7772         else if (!tg3_flag(tp, 5705_PLUS))
7773                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7774         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7775                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7776                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7777         else
7778                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7779
7780         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7781              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7782                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7783                               BDINFO_FLAGS_DISABLED);
7784
7785         /* Disable interrupts */
7786         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7787         tp->napi[0].chk_msi_cnt = 0;
7788         tp->napi[0].last_rx_cons = 0;
7789         tp->napi[0].last_tx_cons = 0;
7790
7791         /* Zero mailbox registers. */
7792         if (tg3_flag(tp, SUPPORT_MSIX)) {
7793                 for (i = 1; i < tp->irq_max; i++) {
7794                         tp->napi[i].tx_prod = 0;
7795                         tp->napi[i].tx_cons = 0;
7796                         if (tg3_flag(tp, ENABLE_TSS))
7797                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7798                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7799                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7800                         tp->napi[0].chk_msi_cnt = 0;
7801                         tp->napi[i].last_rx_cons = 0;
7802                         tp->napi[i].last_tx_cons = 0;
7803                 }
7804                 if (!tg3_flag(tp, ENABLE_TSS))
7805                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7806         } else {
7807                 tp->napi[0].tx_prod = 0;
7808                 tp->napi[0].tx_cons = 0;
7809                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7810                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7811         }
7812
7813         /* Make sure the NIC-based send BD rings are disabled. */
7814         if (!tg3_flag(tp, 5705_PLUS)) {
7815                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7816                 for (i = 0; i < 16; i++)
7817                         tw32_tx_mbox(mbox + i * 8, 0);
7818         }
7819
7820         txrcb = NIC_SRAM_SEND_RCB;
7821         rxrcb = NIC_SRAM_RCV_RET_RCB;
7822
7823         /* Clear status block in ram. */
7824         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7825
7826         /* Set status block DMA address */
7827         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7828              ((u64) tnapi->status_mapping >> 32));
7829         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7830              ((u64) tnapi->status_mapping & 0xffffffff));
7831
7832         if (tnapi->tx_ring) {
7833                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7834                                (TG3_TX_RING_SIZE <<
7835                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7836                                NIC_SRAM_TX_BUFFER_DESC);
7837                 txrcb += TG3_BDINFO_SIZE;
7838         }
7839
7840         if (tnapi->rx_rcb) {
7841                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7842                                (tp->rx_ret_ring_mask + 1) <<
7843                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7844                 rxrcb += TG3_BDINFO_SIZE;
7845         }
7846
7847         stblk = HOSTCC_STATBLCK_RING1;
7848
7849         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7850                 u64 mapping = (u64)tnapi->status_mapping;
7851                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7852                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7853
7854                 /* Clear status block in ram. */
7855                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7856
7857                 if (tnapi->tx_ring) {
7858                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7859                                        (TG3_TX_RING_SIZE <<
7860                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7861                                        NIC_SRAM_TX_BUFFER_DESC);
7862                         txrcb += TG3_BDINFO_SIZE;
7863                 }
7864
7865                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7866                                ((tp->rx_ret_ring_mask + 1) <<
7867                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7868
7869                 stblk += 8;
7870                 rxrcb += TG3_BDINFO_SIZE;
7871         }
7872 }
7873
7874 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7875 {
7876         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7877
7878         if (!tg3_flag(tp, 5750_PLUS) ||
7879             tg3_flag(tp, 5780_CLASS) ||
7880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7881             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7882                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7883         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7884                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7885                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7886         else
7887                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7888
7889         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7890         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7891
7892         val = min(nic_rep_thresh, host_rep_thresh);
7893         tw32(RCVBDI_STD_THRESH, val);
7894
7895         if (tg3_flag(tp, 57765_PLUS))
7896                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7897
7898         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7899                 return;
7900
7901         if (!tg3_flag(tp, 5705_PLUS))
7902                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7903         else
7904                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7905
7906         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7907
7908         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7909         tw32(RCVBDI_JUMBO_THRESH, val);
7910
7911         if (tg3_flag(tp, 57765_PLUS))
7912                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7913 }
7914
7915 /* tp->lock is held. */
7916 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7917 {
7918         u32 val, rdmac_mode;
7919         int i, err, limit;
7920         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7921
7922         tg3_disable_ints(tp);
7923
7924         tg3_stop_fw(tp);
7925
7926         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7927
7928         if (tg3_flag(tp, INIT_COMPLETE))
7929                 tg3_abort_hw(tp, 1);
7930
7931         /* Enable MAC control of LPI */
7932         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7933                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7934                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7935                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7936
7937                 tw32_f(TG3_CPMU_EEE_CTRL,
7938                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7939
7940                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7941                       TG3_CPMU_EEEMD_LPI_IN_TX |
7942                       TG3_CPMU_EEEMD_LPI_IN_RX |
7943                       TG3_CPMU_EEEMD_EEE_ENABLE;
7944
7945                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7946                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7947
7948                 if (tg3_flag(tp, ENABLE_APE))
7949                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7950
7951                 tw32_f(TG3_CPMU_EEE_MODE, val);
7952
7953                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7954                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7955                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7956
7957                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7958                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7959                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7960         }
7961
7962         if (reset_phy)
7963                 tg3_phy_reset(tp);
7964
7965         err = tg3_chip_reset(tp);
7966         if (err)
7967                 return err;
7968
7969         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7970
7971         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7972                 val = tr32(TG3_CPMU_CTRL);
7973                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7974                 tw32(TG3_CPMU_CTRL, val);
7975
7976                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7977                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7978                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7979                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7980
7981                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7982                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7983                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7984                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7985
7986                 val = tr32(TG3_CPMU_HST_ACC);
7987                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7988                 val |= CPMU_HST_ACC_MACCLK_6_25;
7989                 tw32(TG3_CPMU_HST_ACC, val);
7990         }
7991
7992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7993                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7994                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7995                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7996                 tw32(PCIE_PWR_MGMT_THRESH, val);
7997
7998                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7999                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8000
8001                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8002
8003                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8004                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8005         }
8006
8007         if (tg3_flag(tp, L1PLLPD_EN)) {
8008                 u32 grc_mode = tr32(GRC_MODE);
8009
8010                 /* Access the lower 1K of PL PCIE block registers. */
8011                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8012                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8013
8014                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8015                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8016                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8017
8018                 tw32(GRC_MODE, grc_mode);
8019         }
8020
8021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8022                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8023                         u32 grc_mode = tr32(GRC_MODE);
8024
8025                         /* Access the lower 1K of PL PCIE block registers. */
8026                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8027                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8028
8029                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8030                                    TG3_PCIE_PL_LO_PHYCTL5);
8031                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8032                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8033
8034                         tw32(GRC_MODE, grc_mode);
8035                 }
8036
8037                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8038                         u32 grc_mode = tr32(GRC_MODE);
8039
8040                         /* Access the lower 1K of DL PCIE block registers. */
8041                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8042                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8043
8044                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8045                                    TG3_PCIE_DL_LO_FTSMAX);
8046                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8047                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8048                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8049
8050                         tw32(GRC_MODE, grc_mode);
8051                 }
8052
8053                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8054                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8055                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8056                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8057         }
8058
8059         /* This works around an issue with Athlon chipsets on
8060          * B3 tigon3 silicon.  This bit has no effect on any
8061          * other revision.  But do not set this on PCI Express
8062          * chips and don't even touch the clocks if the CPMU is present.
8063          */
8064         if (!tg3_flag(tp, CPMU_PRESENT)) {
8065                 if (!tg3_flag(tp, PCI_EXPRESS))
8066                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8067                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8068         }
8069
8070         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8071             tg3_flag(tp, PCIX_MODE)) {
8072                 val = tr32(TG3PCI_PCISTATE);
8073                 val |= PCISTATE_RETRY_SAME_DMA;
8074                 tw32(TG3PCI_PCISTATE, val);
8075         }
8076
8077         if (tg3_flag(tp, ENABLE_APE)) {
8078                 /* Allow reads and writes to the
8079                  * APE register and memory space.
8080                  */
8081                 val = tr32(TG3PCI_PCISTATE);
8082                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8083                        PCISTATE_ALLOW_APE_SHMEM_WR |
8084                        PCISTATE_ALLOW_APE_PSPACE_WR;
8085                 tw32(TG3PCI_PCISTATE, val);
8086         }
8087
8088         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8089                 /* Enable some hw fixes.  */
8090                 val = tr32(TG3PCI_MSI_DATA);
8091                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8092                 tw32(TG3PCI_MSI_DATA, val);
8093         }
8094
8095         /* Descriptor ring init may make accesses to the
8096          * NIC SRAM area to setup the TX descriptors, so we
8097          * can only do this after the hardware has been
8098          * successfully reset.
8099          */
8100         err = tg3_init_rings(tp);
8101         if (err)
8102                 return err;
8103
8104         if (tg3_flag(tp, 57765_PLUS)) {
8105                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8106                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8107                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8108                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8110                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8111                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8112                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8113         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8114                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8115                 /* This value is determined during the probe time DMA
8116                  * engine test, tg3_test_dma.
8117                  */
8118                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8119         }
8120
8121         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8122                           GRC_MODE_4X_NIC_SEND_RINGS |
8123                           GRC_MODE_NO_TX_PHDR_CSUM |
8124                           GRC_MODE_NO_RX_PHDR_CSUM);
8125         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8126
8127         /* Pseudo-header checksum is done by hardware logic and not
8128          * the offload processers, so make the chip do the pseudo-
8129          * header checksums on receive.  For transmit it is more
8130          * convenient to do the pseudo-header checksum in software
8131          * as Linux does that on transmit for us in all cases.
8132          */
8133         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8134
8135         tw32(GRC_MODE,
8136              tp->grc_mode |
8137              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8138
8139         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8140         val = tr32(GRC_MISC_CFG);
8141         val &= ~0xff;
8142         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8143         tw32(GRC_MISC_CFG, val);
8144
8145         /* Initialize MBUF/DESC pool. */
8146         if (tg3_flag(tp, 5750_PLUS)) {
8147                 /* Do nothing.  */
8148         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8149                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8150                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8151                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8152                 else
8153                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8154                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8155                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8156         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8157                 int fw_len;
8158
8159                 fw_len = tp->fw_len;
8160                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8161                 tw32(BUFMGR_MB_POOL_ADDR,
8162                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8163                 tw32(BUFMGR_MB_POOL_SIZE,
8164                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8165         }
8166
8167         if (tp->dev->mtu <= ETH_DATA_LEN) {
8168                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8169                      tp->bufmgr_config.mbuf_read_dma_low_water);
8170                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8171                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8172                 tw32(BUFMGR_MB_HIGH_WATER,
8173                      tp->bufmgr_config.mbuf_high_water);
8174         } else {
8175                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8176                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8177                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8178                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8179                 tw32(BUFMGR_MB_HIGH_WATER,
8180                      tp->bufmgr_config.mbuf_high_water_jumbo);
8181         }
8182         tw32(BUFMGR_DMA_LOW_WATER,
8183              tp->bufmgr_config.dma_low_water);
8184         tw32(BUFMGR_DMA_HIGH_WATER,
8185              tp->bufmgr_config.dma_high_water);
8186
8187         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8188         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8189                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8190         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8191             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8192             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8193                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8194         tw32(BUFMGR_MODE, val);
8195         for (i = 0; i < 2000; i++) {
8196                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8197                         break;
8198                 udelay(10);
8199         }
8200         if (i >= 2000) {
8201                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8202                 return -ENODEV;
8203         }
8204
8205         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8206                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8207
8208         tg3_setup_rxbd_thresholds(tp);
8209
8210         /* Initialize TG3_BDINFO's at:
8211          *  RCVDBDI_STD_BD:     standard eth size rx ring
8212          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8213          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8214          *
8215          * like so:
8216          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8217          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8218          *                              ring attribute flags
8219          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8220          *
8221          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8222          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8223          *
8224          * The size of each ring is fixed in the firmware, but the location is
8225          * configurable.
8226          */
8227         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8228              ((u64) tpr->rx_std_mapping >> 32));
8229         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8230              ((u64) tpr->rx_std_mapping & 0xffffffff));
8231         if (!tg3_flag(tp, 5717_PLUS))
8232                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8233                      NIC_SRAM_RX_BUFFER_DESC);
8234
8235         /* Disable the mini ring */
8236         if (!tg3_flag(tp, 5705_PLUS))
8237                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8238                      BDINFO_FLAGS_DISABLED);
8239
8240         /* Program the jumbo buffer descriptor ring control
8241          * blocks on those devices that have them.
8242          */
8243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8244             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8245
8246                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8247                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8248                              ((u64) tpr->rx_jmb_mapping >> 32));
8249                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8250                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8251                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8252                               BDINFO_FLAGS_MAXLEN_SHIFT;
8253                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8254                              val | BDINFO_FLAGS_USE_EXT_RECV);
8255                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8256                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8257                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8258                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8259                 } else {
8260                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8261                              BDINFO_FLAGS_DISABLED);
8262                 }
8263
8264                 if (tg3_flag(tp, 57765_PLUS)) {
8265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8266                                 val = TG3_RX_STD_MAX_SIZE_5700;
8267                         else
8268                                 val = TG3_RX_STD_MAX_SIZE_5717;
8269                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8270                         val |= (TG3_RX_STD_DMA_SZ << 2);
8271                 } else
8272                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8273         } else
8274                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8275
8276         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8277
8278         tpr->rx_std_prod_idx = tp->rx_pending;
8279         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8280
8281         tpr->rx_jmb_prod_idx =
8282                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8283         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8284
8285         tg3_rings_reset(tp);
8286
8287         /* Initialize MAC address and backoff seed. */
8288         __tg3_set_mac_addr(tp, 0);
8289
8290         /* MTU + ethernet header + FCS + optional VLAN tag */
8291         tw32(MAC_RX_MTU_SIZE,
8292              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8293
8294         /* The slot time is changed by tg3_setup_phy if we
8295          * run at gigabit with half duplex.
8296          */
8297         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8298               (6 << TX_LENGTHS_IPG_SHIFT) |
8299               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8300
8301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8302                 val |= tr32(MAC_TX_LENGTHS) &
8303                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8304                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8305
8306         tw32(MAC_TX_LENGTHS, val);
8307
8308         /* Receive rules. */
8309         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8310         tw32(RCVLPC_CONFIG, 0x0181);
8311
8312         /* Calculate RDMAC_MODE setting early, we need it to determine
8313          * the RCVLPC_STATE_ENABLE mask.
8314          */
8315         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8316                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8317                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8318                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8319                       RDMAC_MODE_LNGREAD_ENAB);
8320
8321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8322                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8323
8324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8325             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8326             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8327                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8328                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8329                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8330
8331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8332             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8333                 if (tg3_flag(tp, TSO_CAPABLE) &&
8334                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8335                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8336                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8337                            !tg3_flag(tp, IS_5788)) {
8338                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8339                 }
8340         }
8341
8342         if (tg3_flag(tp, PCI_EXPRESS))
8343                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8344
8345         if (tg3_flag(tp, HW_TSO_1) ||
8346             tg3_flag(tp, HW_TSO_2) ||
8347             tg3_flag(tp, HW_TSO_3))
8348                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8349
8350         if (tg3_flag(tp, 57765_PLUS) ||
8351             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8352             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8353                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8354
8355         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8356                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8357
8358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8360             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8361             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8362             tg3_flag(tp, 57765_PLUS)) {
8363                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8364                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8365                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8366                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8367                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8368                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8369                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8370                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8371                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8372                 }
8373                 tw32(TG3_RDMA_RSRVCTRL_REG,
8374                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8375         }
8376
8377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8379                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8380                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8381                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8382                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8383         }
8384
8385         /* Receive/send statistics. */
8386         if (tg3_flag(tp, 5750_PLUS)) {
8387                 val = tr32(RCVLPC_STATS_ENABLE);
8388                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8389                 tw32(RCVLPC_STATS_ENABLE, val);
8390         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8391                    tg3_flag(tp, TSO_CAPABLE)) {
8392                 val = tr32(RCVLPC_STATS_ENABLE);
8393                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8394                 tw32(RCVLPC_STATS_ENABLE, val);
8395         } else {
8396                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8397         }
8398         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8399         tw32(SNDDATAI_STATSENAB, 0xffffff);
8400         tw32(SNDDATAI_STATSCTRL,
8401              (SNDDATAI_SCTRL_ENABLE |
8402               SNDDATAI_SCTRL_FASTUPD));
8403
8404         /* Setup host coalescing engine. */
8405         tw32(HOSTCC_MODE, 0);
8406         for (i = 0; i < 2000; i++) {
8407                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8408                         break;
8409                 udelay(10);
8410         }
8411
8412         __tg3_set_coalesce(tp, &tp->coal);
8413
8414         if (!tg3_flag(tp, 5705_PLUS)) {
8415                 /* Status/statistics block address.  See tg3_timer,
8416                  * the tg3_periodic_fetch_stats call there, and
8417                  * tg3_get_stats to see how this works for 5705/5750 chips.
8418                  */
8419                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8420                      ((u64) tp->stats_mapping >> 32));
8421                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8422                      ((u64) tp->stats_mapping & 0xffffffff));
8423                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8424
8425                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8426
8427                 /* Clear statistics and status block memory areas */
8428                 for (i = NIC_SRAM_STATS_BLK;
8429                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8430                      i += sizeof(u32)) {
8431                         tg3_write_mem(tp, i, 0);
8432                         udelay(40);
8433                 }
8434         }
8435
8436         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8437
8438         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8439         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8440         if (!tg3_flag(tp, 5705_PLUS))
8441                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8442
8443         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8444                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8445                 /* reset to prevent losing 1st rx packet intermittently */
8446                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8447                 udelay(10);
8448         }
8449
8450         if (tg3_flag(tp, ENABLE_APE))
8451                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8452         else
8453                 tp->mac_mode = 0;
8454         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8455                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8456         if (!tg3_flag(tp, 5705_PLUS) &&
8457             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8458             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8459                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8460         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8461         udelay(40);
8462
8463         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8464          * If TG3_FLAG_IS_NIC is zero, we should read the
8465          * register to preserve the GPIO settings for LOMs. The GPIOs,
8466          * whether used as inputs or outputs, are set by boot code after
8467          * reset.
8468          */
8469         if (!tg3_flag(tp, IS_NIC)) {
8470                 u32 gpio_mask;
8471
8472                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8473                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8474                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8475
8476                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8477                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8478                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8479
8480                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8481                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8482
8483                 tp->grc_local_ctrl &= ~gpio_mask;
8484                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8485
8486                 /* GPIO1 must be driven high for eeprom write protect */
8487                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8488                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8489                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8490         }
8491         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8492         udelay(100);
8493
8494         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8495                 val = tr32(MSGINT_MODE);
8496                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8497                 tw32(MSGINT_MODE, val);
8498         }
8499
8500         if (!tg3_flag(tp, 5705_PLUS)) {
8501                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8502                 udelay(40);
8503         }
8504
8505         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8506                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8507                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8508                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8509                WDMAC_MODE_LNGREAD_ENAB);
8510
8511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8512             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8513                 if (tg3_flag(tp, TSO_CAPABLE) &&
8514                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8515                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8516                         /* nothing */
8517                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8518                            !tg3_flag(tp, IS_5788)) {
8519                         val |= WDMAC_MODE_RX_ACCEL;
8520                 }
8521         }
8522
8523         /* Enable host coalescing bug fix */
8524         if (tg3_flag(tp, 5755_PLUS))
8525                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8526
8527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8528                 val |= WDMAC_MODE_BURST_ALL_DATA;
8529
8530         tw32_f(WDMAC_MODE, val);
8531         udelay(40);
8532
8533         if (tg3_flag(tp, PCIX_MODE)) {
8534                 u16 pcix_cmd;
8535
8536                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8537                                      &pcix_cmd);
8538                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8539                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8540                         pcix_cmd |= PCI_X_CMD_READ_2K;
8541                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8542                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8543                         pcix_cmd |= PCI_X_CMD_READ_2K;
8544                 }
8545                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8546                                       pcix_cmd);
8547         }
8548
8549         tw32_f(RDMAC_MODE, rdmac_mode);
8550         udelay(40);
8551
8552         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8553         if (!tg3_flag(tp, 5705_PLUS))
8554                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8555
8556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8557                 tw32(SNDDATAC_MODE,
8558                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8559         else
8560                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8561
8562         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8563         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8564         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8565         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8566                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8567         tw32(RCVDBDI_MODE, val);
8568         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8569         if (tg3_flag(tp, HW_TSO_1) ||
8570             tg3_flag(tp, HW_TSO_2) ||
8571             tg3_flag(tp, HW_TSO_3))
8572                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8573         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8574         if (tg3_flag(tp, ENABLE_TSS))
8575                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8576         tw32(SNDBDI_MODE, val);
8577         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8578
8579         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8580                 err = tg3_load_5701_a0_firmware_fix(tp);
8581                 if (err)
8582                         return err;
8583         }
8584
8585         if (tg3_flag(tp, TSO_CAPABLE)) {
8586                 err = tg3_load_tso_firmware(tp);
8587                 if (err)
8588                         return err;
8589         }
8590
8591         tp->tx_mode = TX_MODE_ENABLE;
8592
8593         if (tg3_flag(tp, 5755_PLUS) ||
8594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8595                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8596
8597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8598                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8599                 tp->tx_mode &= ~val;
8600                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8601         }
8602
8603         tw32_f(MAC_TX_MODE, tp->tx_mode);
8604         udelay(100);
8605
8606         if (tg3_flag(tp, ENABLE_RSS)) {
8607                 u32 reg = MAC_RSS_INDIR_TBL_0;
8608                 u8 *ent = (u8 *)&val;
8609
8610                 /* Setup the indirection table */
8611                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8612                         int idx = i % sizeof(val);
8613
8614                         ent[idx] = i % (tp->irq_cnt - 1);
8615                         if (idx == sizeof(val) - 1) {
8616                                 tw32(reg, val);
8617                                 reg += 4;
8618                         }
8619                 }
8620
8621                 /* Setup the "secret" hash key. */
8622                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8623                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8624                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8625                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8626                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8627                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8628                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8629                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8630                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8631                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8632         }
8633
8634         tp->rx_mode = RX_MODE_ENABLE;
8635         if (tg3_flag(tp, 5755_PLUS))
8636                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8637
8638         if (tg3_flag(tp, ENABLE_RSS))
8639                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8640                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8641                                RX_MODE_RSS_IPV6_HASH_EN |
8642                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8643                                RX_MODE_RSS_IPV4_HASH_EN |
8644                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8645
8646         tw32_f(MAC_RX_MODE, tp->rx_mode);
8647         udelay(10);
8648
8649         tw32(MAC_LED_CTRL, tp->led_ctrl);
8650
8651         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8652         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8653                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8654                 udelay(10);
8655         }
8656         tw32_f(MAC_RX_MODE, tp->rx_mode);
8657         udelay(10);
8658
8659         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8660                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8661                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8662                         /* Set drive transmission level to 1.2V  */
8663                         /* only if the signal pre-emphasis bit is not set  */
8664                         val = tr32(MAC_SERDES_CFG);
8665                         val &= 0xfffff000;
8666                         val |= 0x880;
8667                         tw32(MAC_SERDES_CFG, val);
8668                 }
8669                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8670                         tw32(MAC_SERDES_CFG, 0x616000);
8671         }
8672
8673         /* Prevent chip from dropping frames when flow control
8674          * is enabled.
8675          */
8676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8677                 val = 1;
8678         else
8679                 val = 2;
8680         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8681
8682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8683             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8684                 /* Use hardware link auto-negotiation */
8685                 tg3_flag_set(tp, HW_AUTONEG);
8686         }
8687
8688         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8690                 u32 tmp;
8691
8692                 tmp = tr32(SERDES_RX_CTRL);
8693                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8694                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8695                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8696                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8697         }
8698
8699         if (!tg3_flag(tp, USE_PHYLIB)) {
8700                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8701                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8702                         tp->link_config.speed = tp->link_config.orig_speed;
8703                         tp->link_config.duplex = tp->link_config.orig_duplex;
8704                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8705                 }
8706
8707                 err = tg3_setup_phy(tp, 0);
8708                 if (err)
8709                         return err;
8710
8711                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8712                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8713                         u32 tmp;
8714
8715                         /* Clear CRC stats. */
8716                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8717                                 tg3_writephy(tp, MII_TG3_TEST1,
8718                                              tmp | MII_TG3_TEST1_CRC_EN);
8719                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8720                         }
8721                 }
8722         }
8723
8724         __tg3_set_rx_mode(tp->dev);
8725
8726         /* Initialize receive rules. */
8727         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8728         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8729         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8730         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8731
8732         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8733                 limit = 8;
8734         else
8735                 limit = 16;
8736         if (tg3_flag(tp, ENABLE_ASF))
8737                 limit -= 4;
8738         switch (limit) {
8739         case 16:
8740                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8741         case 15:
8742                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8743         case 14:
8744                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8745         case 13:
8746                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8747         case 12:
8748                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8749         case 11:
8750                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8751         case 10:
8752                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8753         case 9:
8754                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8755         case 8:
8756                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8757         case 7:
8758                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8759         case 6:
8760                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8761         case 5:
8762                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8763         case 4:
8764                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8765         case 3:
8766                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8767         case 2:
8768         case 1:
8769
8770         default:
8771                 break;
8772         }
8773
8774         if (tg3_flag(tp, ENABLE_APE))
8775                 /* Write our heartbeat update interval to APE. */
8776                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8777                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8778
8779         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8780
8781         return 0;
8782 }
8783
8784 /* Called at device open time to get the chip ready for
8785  * packet processing.  Invoked with tp->lock held.
8786  */
8787 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8788 {
8789         tg3_switch_clocks(tp);
8790
8791         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8792
8793         return tg3_reset_hw(tp, reset_phy);
8794 }
8795
8796 #define TG3_STAT_ADD32(PSTAT, REG) \
8797 do {    u32 __val = tr32(REG); \
8798         (PSTAT)->low += __val; \
8799         if ((PSTAT)->low < __val) \
8800                 (PSTAT)->high += 1; \
8801 } while (0)
8802
8803 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8804 {
8805         struct tg3_hw_stats *sp = tp->hw_stats;
8806
8807         if (!netif_carrier_ok(tp->dev))
8808                 return;
8809
8810         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8811         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8812         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8813         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8814         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8815         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8816         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8817         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8818         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8819         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8820         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8821         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8822         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8823
8824         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8825         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8826         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8827         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8828         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8829         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8830         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8831         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8832         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8833         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8834         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8835         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8836         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8837         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8838
8839         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8840         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8841             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8842             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8843                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8844         } else {
8845                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8846                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8847                 if (val) {
8848                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8849                         sp->rx_discards.low += val;
8850                         if (sp->rx_discards.low < val)
8851                                 sp->rx_discards.high += 1;
8852                 }
8853                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8854         }
8855         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8856 }
8857
8858 static void tg3_chk_missed_msi(struct tg3 *tp)
8859 {
8860         u32 i;
8861
8862         for (i = 0; i < tp->irq_cnt; i++) {
8863                 struct tg3_napi *tnapi = &tp->napi[i];
8864
8865                 if (tg3_has_work(tnapi)) {
8866                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8867                             tnapi->last_tx_cons == tnapi->tx_cons) {
8868                                 if (tnapi->chk_msi_cnt < 1) {
8869                                         tnapi->chk_msi_cnt++;
8870                                         return;
8871                                 }
8872                                 tw32_mailbox(tnapi->int_mbox,
8873                                              tnapi->last_tag << 24);
8874                         }
8875                 }
8876                 tnapi->chk_msi_cnt = 0;
8877                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8878                 tnapi->last_tx_cons = tnapi->tx_cons;
8879         }
8880 }
8881
8882 static void tg3_timer(unsigned long __opaque)
8883 {
8884         struct tg3 *tp = (struct tg3 *) __opaque;
8885
8886         if (tp->irq_sync)
8887                 goto restart_timer;
8888
8889         spin_lock(&tp->lock);
8890
8891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8893                 tg3_chk_missed_msi(tp);
8894
8895         if (!tg3_flag(tp, TAGGED_STATUS)) {
8896                 /* All of this garbage is because when using non-tagged
8897                  * IRQ status the mailbox/status_block protocol the chip
8898                  * uses with the cpu is race prone.
8899                  */
8900                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8901                         tw32(GRC_LOCAL_CTRL,
8902                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8903                 } else {
8904                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8905                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8906                 }
8907
8908                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8909                         tg3_flag_set(tp, RESTART_TIMER);
8910                         spin_unlock(&tp->lock);
8911                         schedule_work(&tp->reset_task);
8912                         return;
8913                 }
8914         }
8915
8916         /* This part only runs once per second. */
8917         if (!--tp->timer_counter) {
8918                 if (tg3_flag(tp, 5705_PLUS))
8919                         tg3_periodic_fetch_stats(tp);
8920
8921                 if (tp->setlpicnt && !--tp->setlpicnt)
8922                         tg3_phy_eee_enable(tp);
8923
8924                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8925                         u32 mac_stat;
8926                         int phy_event;
8927
8928                         mac_stat = tr32(MAC_STATUS);
8929
8930                         phy_event = 0;
8931                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8932                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8933                                         phy_event = 1;
8934                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8935                                 phy_event = 1;
8936
8937                         if (phy_event)
8938                                 tg3_setup_phy(tp, 0);
8939                 } else if (tg3_flag(tp, POLL_SERDES)) {
8940                         u32 mac_stat = tr32(MAC_STATUS);
8941                         int need_setup = 0;
8942
8943                         if (netif_carrier_ok(tp->dev) &&
8944                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8945                                 need_setup = 1;
8946                         }
8947                         if (!netif_carrier_ok(tp->dev) &&
8948                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8949                                          MAC_STATUS_SIGNAL_DET))) {
8950                                 need_setup = 1;
8951                         }
8952                         if (need_setup) {
8953                                 if (!tp->serdes_counter) {
8954                                         tw32_f(MAC_MODE,
8955                                              (tp->mac_mode &
8956                                               ~MAC_MODE_PORT_MODE_MASK));
8957                                         udelay(40);
8958                                         tw32_f(MAC_MODE, tp->mac_mode);
8959                                         udelay(40);
8960                                 }
8961                                 tg3_setup_phy(tp, 0);
8962                         }
8963                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8964                            tg3_flag(tp, 5780_CLASS)) {
8965                         tg3_serdes_parallel_detect(tp);
8966                 }
8967
8968                 tp->timer_counter = tp->timer_multiplier;
8969         }
8970
8971         /* Heartbeat is only sent once every 2 seconds.
8972          *
8973          * The heartbeat is to tell the ASF firmware that the host
8974          * driver is still alive.  In the event that the OS crashes,
8975          * ASF needs to reset the hardware to free up the FIFO space
8976          * that may be filled with rx packets destined for the host.
8977          * If the FIFO is full, ASF will no longer function properly.
8978          *
8979          * Unintended resets have been reported on real time kernels
8980          * where the timer doesn't run on time.  Netpoll will also have
8981          * same problem.
8982          *
8983          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8984          * to check the ring condition when the heartbeat is expiring
8985          * before doing the reset.  This will prevent most unintended
8986          * resets.
8987          */
8988         if (!--tp->asf_counter) {
8989                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8990                         tg3_wait_for_event_ack(tp);
8991
8992                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8993                                       FWCMD_NICDRV_ALIVE3);
8994                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8995                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8996                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8997
8998                         tg3_generate_fw_event(tp);
8999                 }
9000                 tp->asf_counter = tp->asf_multiplier;
9001         }
9002
9003         spin_unlock(&tp->lock);
9004
9005 restart_timer:
9006         tp->timer.expires = jiffies + tp->timer_offset;
9007         add_timer(&tp->timer);
9008 }
9009
9010 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9011 {
9012         irq_handler_t fn;
9013         unsigned long flags;
9014         char *name;
9015         struct tg3_napi *tnapi = &tp->napi[irq_num];
9016
9017         if (tp->irq_cnt == 1)
9018                 name = tp->dev->name;
9019         else {
9020                 name = &tnapi->irq_lbl[0];
9021                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9022                 name[IFNAMSIZ-1] = 0;
9023         }
9024
9025         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9026                 fn = tg3_msi;
9027                 if (tg3_flag(tp, 1SHOT_MSI))
9028                         fn = tg3_msi_1shot;
9029                 flags = 0;
9030         } else {
9031                 fn = tg3_interrupt;
9032                 if (tg3_flag(tp, TAGGED_STATUS))
9033                         fn = tg3_interrupt_tagged;
9034                 flags = IRQF_SHARED;
9035         }
9036
9037         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9038 }
9039
9040 static int tg3_test_interrupt(struct tg3 *tp)
9041 {
9042         struct tg3_napi *tnapi = &tp->napi[0];
9043         struct net_device *dev = tp->dev;
9044         int err, i, intr_ok = 0;
9045         u32 val;
9046
9047         if (!netif_running(dev))
9048                 return -ENODEV;
9049
9050         tg3_disable_ints(tp);
9051
9052         free_irq(tnapi->irq_vec, tnapi);
9053
9054         /*
9055          * Turn off MSI one shot mode.  Otherwise this test has no
9056          * observable way to know whether the interrupt was delivered.
9057          */
9058         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9059                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9060                 tw32(MSGINT_MODE, val);
9061         }
9062
9063         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9064                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9065         if (err)
9066                 return err;
9067
9068         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9069         tg3_enable_ints(tp);
9070
9071         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9072                tnapi->coal_now);
9073
9074         for (i = 0; i < 5; i++) {
9075                 u32 int_mbox, misc_host_ctrl;
9076
9077                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9078                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9079
9080                 if ((int_mbox != 0) ||
9081                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9082                         intr_ok = 1;
9083                         break;
9084                 }
9085
9086                 msleep(10);
9087         }
9088
9089         tg3_disable_ints(tp);
9090
9091         free_irq(tnapi->irq_vec, tnapi);
9092
9093         err = tg3_request_irq(tp, 0);
9094
9095         if (err)
9096                 return err;
9097
9098         if (intr_ok) {
9099                 /* Reenable MSI one shot mode. */
9100                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9101                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9102                         tw32(MSGINT_MODE, val);
9103                 }
9104                 return 0;
9105         }
9106
9107         return -EIO;
9108 }
9109
9110 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9111  * successfully restored
9112  */
9113 static int tg3_test_msi(struct tg3 *tp)
9114 {
9115         int err;
9116         u16 pci_cmd;
9117
9118         if (!tg3_flag(tp, USING_MSI))
9119                 return 0;
9120
9121         /* Turn off SERR reporting in case MSI terminates with Master
9122          * Abort.
9123          */
9124         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9125         pci_write_config_word(tp->pdev, PCI_COMMAND,
9126                               pci_cmd & ~PCI_COMMAND_SERR);
9127
9128         err = tg3_test_interrupt(tp);
9129
9130         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9131
9132         if (!err)
9133                 return 0;
9134
9135         /* other failures */
9136         if (err != -EIO)
9137                 return err;
9138
9139         /* MSI test failed, go back to INTx mode */
9140         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9141                     "to INTx mode. Please report this failure to the PCI "
9142                     "maintainer and include system chipset information\n");
9143
9144         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9145
9146         pci_disable_msi(tp->pdev);
9147
9148         tg3_flag_clear(tp, USING_MSI);
9149         tp->napi[0].irq_vec = tp->pdev->irq;
9150
9151         err = tg3_request_irq(tp, 0);
9152         if (err)
9153                 return err;
9154
9155         /* Need to reset the chip because the MSI cycle may have terminated
9156          * with Master Abort.
9157          */
9158         tg3_full_lock(tp, 1);
9159
9160         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9161         err = tg3_init_hw(tp, 1);
9162
9163         tg3_full_unlock(tp);
9164
9165         if (err)
9166                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9167
9168         return err;
9169 }
9170
9171 static int tg3_request_firmware(struct tg3 *tp)
9172 {
9173         const __be32 *fw_data;
9174
9175         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9176                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9177                            tp->fw_needed);
9178                 return -ENOENT;
9179         }
9180
9181         fw_data = (void *)tp->fw->data;
9182
9183         /* Firmware blob starts with version numbers, followed by
9184          * start address and _full_ length including BSS sections
9185          * (which must be longer than the actual data, of course
9186          */
9187
9188         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9189         if (tp->fw_len < (tp->fw->size - 12)) {
9190                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9191                            tp->fw_len, tp->fw_needed);
9192                 release_firmware(tp->fw);
9193                 tp->fw = NULL;
9194                 return -EINVAL;
9195         }
9196
9197         /* We no longer need firmware; we have it. */
9198         tp->fw_needed = NULL;
9199         return 0;
9200 }
9201
9202 static bool tg3_enable_msix(struct tg3 *tp)
9203 {
9204         int i, rc, cpus = num_online_cpus();
9205         struct msix_entry msix_ent[tp->irq_max];
9206
9207         if (cpus == 1)
9208                 /* Just fallback to the simpler MSI mode. */
9209                 return false;
9210
9211         /*
9212          * We want as many rx rings enabled as there are cpus.
9213          * The first MSIX vector only deals with link interrupts, etc,
9214          * so we add one to the number of vectors we are requesting.
9215          */
9216         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9217
9218         for (i = 0; i < tp->irq_max; i++) {
9219                 msix_ent[i].entry  = i;
9220                 msix_ent[i].vector = 0;
9221         }
9222
9223         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9224         if (rc < 0) {
9225                 return false;
9226         } else if (rc != 0) {
9227                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9228                         return false;
9229                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9230                               tp->irq_cnt, rc);
9231                 tp->irq_cnt = rc;
9232         }
9233
9234         for (i = 0; i < tp->irq_max; i++)
9235                 tp->napi[i].irq_vec = msix_ent[i].vector;
9236
9237         netif_set_real_num_tx_queues(tp->dev, 1);
9238         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9239         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9240                 pci_disable_msix(tp->pdev);
9241                 return false;
9242         }
9243
9244         if (tp->irq_cnt > 1) {
9245                 tg3_flag_set(tp, ENABLE_RSS);
9246
9247                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9248                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9249                         tg3_flag_set(tp, ENABLE_TSS);
9250                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9251                 }
9252         }
9253
9254         return true;
9255 }
9256
9257 static void tg3_ints_init(struct tg3 *tp)
9258 {
9259         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9260             !tg3_flag(tp, TAGGED_STATUS)) {
9261                 /* All MSI supporting chips should support tagged
9262                  * status.  Assert that this is the case.
9263                  */
9264                 netdev_warn(tp->dev,
9265                             "MSI without TAGGED_STATUS? Not using MSI\n");
9266                 goto defcfg;
9267         }
9268
9269         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9270                 tg3_flag_set(tp, USING_MSIX);
9271         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9272                 tg3_flag_set(tp, USING_MSI);
9273
9274         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9275                 u32 msi_mode = tr32(MSGINT_MODE);
9276                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9277                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9278                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9279         }
9280 defcfg:
9281         if (!tg3_flag(tp, USING_MSIX)) {
9282                 tp->irq_cnt = 1;
9283                 tp->napi[0].irq_vec = tp->pdev->irq;
9284                 netif_set_real_num_tx_queues(tp->dev, 1);
9285                 netif_set_real_num_rx_queues(tp->dev, 1);
9286         }
9287 }
9288
9289 static void tg3_ints_fini(struct tg3 *tp)
9290 {
9291         if (tg3_flag(tp, USING_MSIX))
9292                 pci_disable_msix(tp->pdev);
9293         else if (tg3_flag(tp, USING_MSI))
9294                 pci_disable_msi(tp->pdev);
9295         tg3_flag_clear(tp, USING_MSI);
9296         tg3_flag_clear(tp, USING_MSIX);
9297         tg3_flag_clear(tp, ENABLE_RSS);
9298         tg3_flag_clear(tp, ENABLE_TSS);
9299 }
9300
9301 static int tg3_open(struct net_device *dev)
9302 {
9303         struct tg3 *tp = netdev_priv(dev);
9304         int i, err;
9305
9306         if (tp->fw_needed) {
9307                 err = tg3_request_firmware(tp);
9308                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9309                         if (err)
9310                                 return err;
9311                 } else if (err) {
9312                         netdev_warn(tp->dev, "TSO capability disabled\n");
9313                         tg3_flag_clear(tp, TSO_CAPABLE);
9314                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9315                         netdev_notice(tp->dev, "TSO capability restored\n");
9316                         tg3_flag_set(tp, TSO_CAPABLE);
9317                 }
9318         }
9319
9320         netif_carrier_off(tp->dev);
9321
9322         err = tg3_power_up(tp);
9323         if (err)
9324                 return err;
9325
9326         tg3_full_lock(tp, 0);
9327
9328         tg3_disable_ints(tp);
9329         tg3_flag_clear(tp, INIT_COMPLETE);
9330
9331         tg3_full_unlock(tp);
9332
9333         /*
9334          * Setup interrupts first so we know how
9335          * many NAPI resources to allocate
9336          */
9337         tg3_ints_init(tp);
9338
9339         /* The placement of this call is tied
9340          * to the setup and use of Host TX descriptors.
9341          */
9342         err = tg3_alloc_consistent(tp);
9343         if (err)
9344                 goto err_out1;
9345
9346         tg3_napi_init(tp);
9347
9348         tg3_napi_enable(tp);
9349
9350         for (i = 0; i < tp->irq_cnt; i++) {
9351                 struct tg3_napi *tnapi = &tp->napi[i];
9352                 err = tg3_request_irq(tp, i);
9353                 if (err) {
9354                         for (i--; i >= 0; i--)
9355                                 free_irq(tnapi->irq_vec, tnapi);
9356                         break;
9357                 }
9358         }
9359
9360         if (err)
9361                 goto err_out2;
9362
9363         tg3_full_lock(tp, 0);
9364
9365         err = tg3_init_hw(tp, 1);
9366         if (err) {
9367                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9368                 tg3_free_rings(tp);
9369         } else {
9370                 if (tg3_flag(tp, TAGGED_STATUS) &&
9371                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9372                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9373                         tp->timer_offset = HZ;
9374                 else
9375                         tp->timer_offset = HZ / 10;
9376
9377                 BUG_ON(tp->timer_offset > HZ);
9378                 tp->timer_counter = tp->timer_multiplier =
9379                         (HZ / tp->timer_offset);
9380                 tp->asf_counter = tp->asf_multiplier =
9381                         ((HZ / tp->timer_offset) * 2);
9382
9383                 init_timer(&tp->timer);
9384                 tp->timer.expires = jiffies + tp->timer_offset;
9385                 tp->timer.data = (unsigned long) tp;
9386                 tp->timer.function = tg3_timer;
9387         }
9388
9389         tg3_full_unlock(tp);
9390
9391         if (err)
9392                 goto err_out3;
9393
9394         if (tg3_flag(tp, USING_MSI)) {
9395                 err = tg3_test_msi(tp);
9396
9397                 if (err) {
9398                         tg3_full_lock(tp, 0);
9399                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9400                         tg3_free_rings(tp);
9401                         tg3_full_unlock(tp);
9402
9403                         goto err_out2;
9404                 }
9405
9406                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9407                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9408
9409                         tw32(PCIE_TRANSACTION_CFG,
9410                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9411                 }
9412         }
9413
9414         tg3_phy_start(tp);
9415
9416         tg3_full_lock(tp, 0);
9417
9418         add_timer(&tp->timer);
9419         tg3_flag_set(tp, INIT_COMPLETE);
9420         tg3_enable_ints(tp);
9421
9422         tg3_full_unlock(tp);
9423
9424         netif_tx_start_all_queues(dev);
9425
9426         /*
9427          * Reset loopback feature if it was turned on while the device was down
9428          * make sure that it's installed properly now.
9429          */
9430         if (dev->features & NETIF_F_LOOPBACK)
9431                 tg3_set_loopback(dev, dev->features);
9432
9433         return 0;
9434
9435 err_out3:
9436         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9437                 struct tg3_napi *tnapi = &tp->napi[i];
9438                 free_irq(tnapi->irq_vec, tnapi);
9439         }
9440
9441 err_out2:
9442         tg3_napi_disable(tp);
9443         tg3_napi_fini(tp);
9444         tg3_free_consistent(tp);
9445
9446 err_out1:
9447         tg3_ints_fini(tp);
9448         return err;
9449 }
9450
9451 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9452                                                  struct rtnl_link_stats64 *);
9453 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9454
9455 static int tg3_close(struct net_device *dev)
9456 {
9457         int i;
9458         struct tg3 *tp = netdev_priv(dev);
9459
9460         tg3_napi_disable(tp);
9461         cancel_work_sync(&tp->reset_task);
9462
9463         netif_tx_stop_all_queues(dev);
9464
9465         del_timer_sync(&tp->timer);
9466
9467         tg3_phy_stop(tp);
9468
9469         tg3_full_lock(tp, 1);
9470
9471         tg3_disable_ints(tp);
9472
9473         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9474         tg3_free_rings(tp);
9475         tg3_flag_clear(tp, INIT_COMPLETE);
9476
9477         tg3_full_unlock(tp);
9478
9479         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9480                 struct tg3_napi *tnapi = &tp->napi[i];
9481                 free_irq(tnapi->irq_vec, tnapi);
9482         }
9483
9484         tg3_ints_fini(tp);
9485
9486         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9487
9488         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9489                sizeof(tp->estats_prev));
9490
9491         tg3_napi_fini(tp);
9492
9493         tg3_free_consistent(tp);
9494
9495         tg3_power_down(tp);
9496
9497         netif_carrier_off(tp->dev);
9498
9499         return 0;
9500 }
9501
9502 static inline u64 get_stat64(tg3_stat64_t *val)
9503 {
9504        return ((u64)val->high << 32) | ((u64)val->low);
9505 }
9506
9507 static u64 calc_crc_errors(struct tg3 *tp)
9508 {
9509         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9510
9511         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9512             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9513              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9514                 u32 val;
9515
9516                 spin_lock_bh(&tp->lock);
9517                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9518                         tg3_writephy(tp, MII_TG3_TEST1,
9519                                      val | MII_TG3_TEST1_CRC_EN);
9520                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9521                 } else
9522                         val = 0;
9523                 spin_unlock_bh(&tp->lock);
9524
9525                 tp->phy_crc_errors += val;
9526
9527                 return tp->phy_crc_errors;
9528         }
9529
9530         return get_stat64(&hw_stats->rx_fcs_errors);
9531 }
9532
9533 #define ESTAT_ADD(member) \
9534         estats->member =        old_estats->member + \
9535                                 get_stat64(&hw_stats->member)
9536
9537 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9538 {
9539         struct tg3_ethtool_stats *estats = &tp->estats;
9540         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9541         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9542
9543         if (!hw_stats)
9544                 return old_estats;
9545
9546         ESTAT_ADD(rx_octets);
9547         ESTAT_ADD(rx_fragments);
9548         ESTAT_ADD(rx_ucast_packets);
9549         ESTAT_ADD(rx_mcast_packets);
9550         ESTAT_ADD(rx_bcast_packets);
9551         ESTAT_ADD(rx_fcs_errors);
9552         ESTAT_ADD(rx_align_errors);
9553         ESTAT_ADD(rx_xon_pause_rcvd);
9554         ESTAT_ADD(rx_xoff_pause_rcvd);
9555         ESTAT_ADD(rx_mac_ctrl_rcvd);
9556         ESTAT_ADD(rx_xoff_entered);
9557         ESTAT_ADD(rx_frame_too_long_errors);
9558         ESTAT_ADD(rx_jabbers);
9559         ESTAT_ADD(rx_undersize_packets);
9560         ESTAT_ADD(rx_in_length_errors);
9561         ESTAT_ADD(rx_out_length_errors);
9562         ESTAT_ADD(rx_64_or_less_octet_packets);
9563         ESTAT_ADD(rx_65_to_127_octet_packets);
9564         ESTAT_ADD(rx_128_to_255_octet_packets);
9565         ESTAT_ADD(rx_256_to_511_octet_packets);
9566         ESTAT_ADD(rx_512_to_1023_octet_packets);
9567         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9568         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9569         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9570         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9571         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9572
9573         ESTAT_ADD(tx_octets);
9574         ESTAT_ADD(tx_collisions);
9575         ESTAT_ADD(tx_xon_sent);
9576         ESTAT_ADD(tx_xoff_sent);
9577         ESTAT_ADD(tx_flow_control);
9578         ESTAT_ADD(tx_mac_errors);
9579         ESTAT_ADD(tx_single_collisions);
9580         ESTAT_ADD(tx_mult_collisions);
9581         ESTAT_ADD(tx_deferred);
9582         ESTAT_ADD(tx_excessive_collisions);
9583         ESTAT_ADD(tx_late_collisions);
9584         ESTAT_ADD(tx_collide_2times);
9585         ESTAT_ADD(tx_collide_3times);
9586         ESTAT_ADD(tx_collide_4times);
9587         ESTAT_ADD(tx_collide_5times);
9588         ESTAT_ADD(tx_collide_6times);
9589         ESTAT_ADD(tx_collide_7times);
9590         ESTAT_ADD(tx_collide_8times);
9591         ESTAT_ADD(tx_collide_9times);
9592         ESTAT_ADD(tx_collide_10times);
9593         ESTAT_ADD(tx_collide_11times);
9594         ESTAT_ADD(tx_collide_12times);
9595         ESTAT_ADD(tx_collide_13times);
9596         ESTAT_ADD(tx_collide_14times);
9597         ESTAT_ADD(tx_collide_15times);
9598         ESTAT_ADD(tx_ucast_packets);
9599         ESTAT_ADD(tx_mcast_packets);
9600         ESTAT_ADD(tx_bcast_packets);
9601         ESTAT_ADD(tx_carrier_sense_errors);
9602         ESTAT_ADD(tx_discards);
9603         ESTAT_ADD(tx_errors);
9604
9605         ESTAT_ADD(dma_writeq_full);
9606         ESTAT_ADD(dma_write_prioq_full);
9607         ESTAT_ADD(rxbds_empty);
9608         ESTAT_ADD(rx_discards);
9609         ESTAT_ADD(rx_errors);
9610         ESTAT_ADD(rx_threshold_hit);
9611
9612         ESTAT_ADD(dma_readq_full);
9613         ESTAT_ADD(dma_read_prioq_full);
9614         ESTAT_ADD(tx_comp_queue_full);
9615
9616         ESTAT_ADD(ring_set_send_prod_index);
9617         ESTAT_ADD(ring_status_update);
9618         ESTAT_ADD(nic_irqs);
9619         ESTAT_ADD(nic_avoided_irqs);
9620         ESTAT_ADD(nic_tx_threshold_hit);
9621
9622         ESTAT_ADD(mbuf_lwm_thresh_hit);
9623
9624         return estats;
9625 }
9626
9627 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9628                                                  struct rtnl_link_stats64 *stats)
9629 {
9630         struct tg3 *tp = netdev_priv(dev);
9631         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9632         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9633
9634         if (!hw_stats)
9635                 return old_stats;
9636
9637         stats->rx_packets = old_stats->rx_packets +
9638                 get_stat64(&hw_stats->rx_ucast_packets) +
9639                 get_stat64(&hw_stats->rx_mcast_packets) +
9640                 get_stat64(&hw_stats->rx_bcast_packets);
9641
9642         stats->tx_packets = old_stats->tx_packets +
9643                 get_stat64(&hw_stats->tx_ucast_packets) +
9644                 get_stat64(&hw_stats->tx_mcast_packets) +
9645                 get_stat64(&hw_stats->tx_bcast_packets);
9646
9647         stats->rx_bytes = old_stats->rx_bytes +
9648                 get_stat64(&hw_stats->rx_octets);
9649         stats->tx_bytes = old_stats->tx_bytes +
9650                 get_stat64(&hw_stats->tx_octets);
9651
9652         stats->rx_errors = old_stats->rx_errors +
9653                 get_stat64(&hw_stats->rx_errors);
9654         stats->tx_errors = old_stats->tx_errors +
9655                 get_stat64(&hw_stats->tx_errors) +
9656                 get_stat64(&hw_stats->tx_mac_errors) +
9657                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9658                 get_stat64(&hw_stats->tx_discards);
9659
9660         stats->multicast = old_stats->multicast +
9661                 get_stat64(&hw_stats->rx_mcast_packets);
9662         stats->collisions = old_stats->collisions +
9663                 get_stat64(&hw_stats->tx_collisions);
9664
9665         stats->rx_length_errors = old_stats->rx_length_errors +
9666                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9667                 get_stat64(&hw_stats->rx_undersize_packets);
9668
9669         stats->rx_over_errors = old_stats->rx_over_errors +
9670                 get_stat64(&hw_stats->rxbds_empty);
9671         stats->rx_frame_errors = old_stats->rx_frame_errors +
9672                 get_stat64(&hw_stats->rx_align_errors);
9673         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9674                 get_stat64(&hw_stats->tx_discards);
9675         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9676                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9677
9678         stats->rx_crc_errors = old_stats->rx_crc_errors +
9679                 calc_crc_errors(tp);
9680
9681         stats->rx_missed_errors = old_stats->rx_missed_errors +
9682                 get_stat64(&hw_stats->rx_discards);
9683
9684         stats->rx_dropped = tp->rx_dropped;
9685
9686         return stats;
9687 }
9688
9689 static inline u32 calc_crc(unsigned char *buf, int len)
9690 {
9691         u32 reg;
9692         u32 tmp;
9693         int j, k;
9694
9695         reg = 0xffffffff;
9696
9697         for (j = 0; j < len; j++) {
9698                 reg ^= buf[j];
9699
9700                 for (k = 0; k < 8; k++) {
9701                         tmp = reg & 0x01;
9702
9703                         reg >>= 1;
9704
9705                         if (tmp)
9706                                 reg ^= 0xedb88320;
9707                 }
9708         }
9709
9710         return ~reg;
9711 }
9712
9713 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9714 {
9715         /* accept or reject all multicast frames */
9716         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9717         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9718         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9719         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9720 }
9721
9722 static void __tg3_set_rx_mode(struct net_device *dev)
9723 {
9724         struct tg3 *tp = netdev_priv(dev);
9725         u32 rx_mode;
9726
9727         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9728                                   RX_MODE_KEEP_VLAN_TAG);
9729
9730 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9731         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9732          * flag clear.
9733          */
9734         if (!tg3_flag(tp, ENABLE_ASF))
9735                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9736 #endif
9737
9738         if (dev->flags & IFF_PROMISC) {
9739                 /* Promiscuous mode. */
9740                 rx_mode |= RX_MODE_PROMISC;
9741         } else if (dev->flags & IFF_ALLMULTI) {
9742                 /* Accept all multicast. */
9743                 tg3_set_multi(tp, 1);
9744         } else if (netdev_mc_empty(dev)) {
9745                 /* Reject all multicast. */
9746                 tg3_set_multi(tp, 0);
9747         } else {
9748                 /* Accept one or more multicast(s). */
9749                 struct netdev_hw_addr *ha;
9750                 u32 mc_filter[4] = { 0, };
9751                 u32 regidx;
9752                 u32 bit;
9753                 u32 crc;
9754
9755                 netdev_for_each_mc_addr(ha, dev) {
9756                         crc = calc_crc(ha->addr, ETH_ALEN);
9757                         bit = ~crc & 0x7f;
9758                         regidx = (bit & 0x60) >> 5;
9759                         bit &= 0x1f;
9760                         mc_filter[regidx] |= (1 << bit);
9761                 }
9762
9763                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9764                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9765                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9766                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9767         }
9768
9769         if (rx_mode != tp->rx_mode) {
9770                 tp->rx_mode = rx_mode;
9771                 tw32_f(MAC_RX_MODE, rx_mode);
9772                 udelay(10);
9773         }
9774 }
9775
9776 static void tg3_set_rx_mode(struct net_device *dev)
9777 {
9778         struct tg3 *tp = netdev_priv(dev);
9779
9780         if (!netif_running(dev))
9781                 return;
9782
9783         tg3_full_lock(tp, 0);
9784         __tg3_set_rx_mode(dev);
9785         tg3_full_unlock(tp);
9786 }
9787
9788 static int tg3_get_regs_len(struct net_device *dev)
9789 {
9790         return TG3_REG_BLK_SIZE;
9791 }
9792
9793 static void tg3_get_regs(struct net_device *dev,
9794                 struct ethtool_regs *regs, void *_p)
9795 {
9796         struct tg3 *tp = netdev_priv(dev);
9797
9798         regs->version = 0;
9799
9800         memset(_p, 0, TG3_REG_BLK_SIZE);
9801
9802         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9803                 return;
9804
9805         tg3_full_lock(tp, 0);
9806
9807         tg3_dump_legacy_regs(tp, (u32 *)_p);
9808
9809         tg3_full_unlock(tp);
9810 }
9811
9812 static int tg3_get_eeprom_len(struct net_device *dev)
9813 {
9814         struct tg3 *tp = netdev_priv(dev);
9815
9816         return tp->nvram_size;
9817 }
9818
9819 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9820 {
9821         struct tg3 *tp = netdev_priv(dev);
9822         int ret;
9823         u8  *pd;
9824         u32 i, offset, len, b_offset, b_count;
9825         __be32 val;
9826
9827         if (tg3_flag(tp, NO_NVRAM))
9828                 return -EINVAL;
9829
9830         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9831                 return -EAGAIN;
9832
9833         offset = eeprom->offset;
9834         len = eeprom->len;
9835         eeprom->len = 0;
9836
9837         eeprom->magic = TG3_EEPROM_MAGIC;
9838
9839         if (offset & 3) {
9840                 /* adjustments to start on required 4 byte boundary */
9841                 b_offset = offset & 3;
9842                 b_count = 4 - b_offset;
9843                 if (b_count > len) {
9844                         /* i.e. offset=1 len=2 */
9845                         b_count = len;
9846                 }
9847                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9848                 if (ret)
9849                         return ret;
9850                 memcpy(data, ((char *)&val) + b_offset, b_count);
9851                 len -= b_count;
9852                 offset += b_count;
9853                 eeprom->len += b_count;
9854         }
9855
9856         /* read bytes up to the last 4 byte boundary */
9857         pd = &data[eeprom->len];
9858         for (i = 0; i < (len - (len & 3)); i += 4) {
9859                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9860                 if (ret) {
9861                         eeprom->len += i;
9862                         return ret;
9863                 }
9864                 memcpy(pd + i, &val, 4);
9865         }
9866         eeprom->len += i;
9867
9868         if (len & 3) {
9869                 /* read last bytes not ending on 4 byte boundary */
9870                 pd = &data[eeprom->len];
9871                 b_count = len & 3;
9872                 b_offset = offset + len - b_count;
9873                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9874                 if (ret)
9875                         return ret;
9876                 memcpy(pd, &val, b_count);
9877                 eeprom->len += b_count;
9878         }
9879         return 0;
9880 }
9881
9882 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9883
9884 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9885 {
9886         struct tg3 *tp = netdev_priv(dev);
9887         int ret;
9888         u32 offset, len, b_offset, odd_len;
9889         u8 *buf;
9890         __be32 start, end;
9891
9892         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9893                 return -EAGAIN;
9894
9895         if (tg3_flag(tp, NO_NVRAM) ||
9896             eeprom->magic != TG3_EEPROM_MAGIC)
9897                 return -EINVAL;
9898
9899         offset = eeprom->offset;
9900         len = eeprom->len;
9901
9902         if ((b_offset = (offset & 3))) {
9903                 /* adjustments to start on required 4 byte boundary */
9904                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9905                 if (ret)
9906                         return ret;
9907                 len += b_offset;
9908                 offset &= ~3;
9909                 if (len < 4)
9910                         len = 4;
9911         }
9912
9913         odd_len = 0;
9914         if (len & 3) {
9915                 /* adjustments to end on required 4 byte boundary */
9916                 odd_len = 1;
9917                 len = (len + 3) & ~3;
9918                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9919                 if (ret)
9920                         return ret;
9921         }
9922
9923         buf = data;
9924         if (b_offset || odd_len) {
9925                 buf = kmalloc(len, GFP_KERNEL);
9926                 if (!buf)
9927                         return -ENOMEM;
9928                 if (b_offset)
9929                         memcpy(buf, &start, 4);
9930                 if (odd_len)
9931                         memcpy(buf+len-4, &end, 4);
9932                 memcpy(buf + b_offset, data, eeprom->len);
9933         }
9934
9935         ret = tg3_nvram_write_block(tp, offset, len, buf);
9936
9937         if (buf != data)
9938                 kfree(buf);
9939
9940         return ret;
9941 }
9942
9943 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9944 {
9945         struct tg3 *tp = netdev_priv(dev);
9946
9947         if (tg3_flag(tp, USE_PHYLIB)) {
9948                 struct phy_device *phydev;
9949                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9950                         return -EAGAIN;
9951                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9952                 return phy_ethtool_gset(phydev, cmd);
9953         }
9954
9955         cmd->supported = (SUPPORTED_Autoneg);
9956
9957         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9958                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9959                                    SUPPORTED_1000baseT_Full);
9960
9961         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9962                 cmd->supported |= (SUPPORTED_100baseT_Half |
9963                                   SUPPORTED_100baseT_Full |
9964                                   SUPPORTED_10baseT_Half |
9965                                   SUPPORTED_10baseT_Full |
9966                                   SUPPORTED_TP);
9967                 cmd->port = PORT_TP;
9968         } else {
9969                 cmd->supported |= SUPPORTED_FIBRE;
9970                 cmd->port = PORT_FIBRE;
9971         }
9972
9973         cmd->advertising = tp->link_config.advertising;
9974         if (tg3_flag(tp, PAUSE_AUTONEG)) {
9975                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
9976                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9977                                 cmd->advertising |= ADVERTISED_Pause;
9978                         } else {
9979                                 cmd->advertising |= ADVERTISED_Pause |
9980                                                     ADVERTISED_Asym_Pause;
9981                         }
9982                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9983                         cmd->advertising |= ADVERTISED_Asym_Pause;
9984                 }
9985         }
9986         if (netif_running(dev)) {
9987                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9988                 cmd->duplex = tp->link_config.active_duplex;
9989         } else {
9990                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9991                 cmd->duplex = DUPLEX_INVALID;
9992         }
9993         cmd->phy_address = tp->phy_addr;
9994         cmd->transceiver = XCVR_INTERNAL;
9995         cmd->autoneg = tp->link_config.autoneg;
9996         cmd->maxtxpkt = 0;
9997         cmd->maxrxpkt = 0;
9998         return 0;
9999 }
10000
10001 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10002 {
10003         struct tg3 *tp = netdev_priv(dev);
10004         u32 speed = ethtool_cmd_speed(cmd);
10005
10006         if (tg3_flag(tp, USE_PHYLIB)) {
10007                 struct phy_device *phydev;
10008                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10009                         return -EAGAIN;
10010                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10011                 return phy_ethtool_sset(phydev, cmd);
10012         }
10013
10014         if (cmd->autoneg != AUTONEG_ENABLE &&
10015             cmd->autoneg != AUTONEG_DISABLE)
10016                 return -EINVAL;
10017
10018         if (cmd->autoneg == AUTONEG_DISABLE &&
10019             cmd->duplex != DUPLEX_FULL &&
10020             cmd->duplex != DUPLEX_HALF)
10021                 return -EINVAL;
10022
10023         if (cmd->autoneg == AUTONEG_ENABLE) {
10024                 u32 mask = ADVERTISED_Autoneg |
10025                            ADVERTISED_Pause |
10026                            ADVERTISED_Asym_Pause;
10027
10028                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10029                         mask |= ADVERTISED_1000baseT_Half |
10030                                 ADVERTISED_1000baseT_Full;
10031
10032                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10033                         mask |= ADVERTISED_100baseT_Half |
10034                                 ADVERTISED_100baseT_Full |
10035                                 ADVERTISED_10baseT_Half |
10036                                 ADVERTISED_10baseT_Full |
10037                                 ADVERTISED_TP;
10038                 else
10039                         mask |= ADVERTISED_FIBRE;
10040
10041                 if (cmd->advertising & ~mask)
10042                         return -EINVAL;
10043
10044                 mask &= (ADVERTISED_1000baseT_Half |
10045                          ADVERTISED_1000baseT_Full |
10046                          ADVERTISED_100baseT_Half |
10047                          ADVERTISED_100baseT_Full |
10048                          ADVERTISED_10baseT_Half |
10049                          ADVERTISED_10baseT_Full);
10050
10051                 cmd->advertising &= mask;
10052         } else {
10053                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10054                         if (speed != SPEED_1000)
10055                                 return -EINVAL;
10056
10057                         if (cmd->duplex != DUPLEX_FULL)
10058                                 return -EINVAL;
10059                 } else {
10060                         if (speed != SPEED_100 &&
10061                             speed != SPEED_10)
10062                                 return -EINVAL;
10063                 }
10064         }
10065
10066         tg3_full_lock(tp, 0);
10067
10068         tp->link_config.autoneg = cmd->autoneg;
10069         if (cmd->autoneg == AUTONEG_ENABLE) {
10070                 tp->link_config.advertising = (cmd->advertising |
10071                                               ADVERTISED_Autoneg);
10072                 tp->link_config.speed = SPEED_INVALID;
10073                 tp->link_config.duplex = DUPLEX_INVALID;
10074         } else {
10075                 tp->link_config.advertising = 0;
10076                 tp->link_config.speed = speed;
10077                 tp->link_config.duplex = cmd->duplex;
10078         }
10079
10080         tp->link_config.orig_speed = tp->link_config.speed;
10081         tp->link_config.orig_duplex = tp->link_config.duplex;
10082         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10083
10084         if (netif_running(dev))
10085                 tg3_setup_phy(tp, 1);
10086
10087         tg3_full_unlock(tp);
10088
10089         return 0;
10090 }
10091
10092 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10093 {
10094         struct tg3 *tp = netdev_priv(dev);
10095
10096         strcpy(info->driver, DRV_MODULE_NAME);
10097         strcpy(info->version, DRV_MODULE_VERSION);
10098         strcpy(info->fw_version, tp->fw_ver);
10099         strcpy(info->bus_info, pci_name(tp->pdev));
10100 }
10101
10102 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10103 {
10104         struct tg3 *tp = netdev_priv(dev);
10105
10106         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10107                 wol->supported = WAKE_MAGIC;
10108         else
10109                 wol->supported = 0;
10110         wol->wolopts = 0;
10111         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10112                 wol->wolopts = WAKE_MAGIC;
10113         memset(&wol->sopass, 0, sizeof(wol->sopass));
10114 }
10115
10116 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10117 {
10118         struct tg3 *tp = netdev_priv(dev);
10119         struct device *dp = &tp->pdev->dev;
10120
10121         if (wol->wolopts & ~WAKE_MAGIC)
10122                 return -EINVAL;
10123         if ((wol->wolopts & WAKE_MAGIC) &&
10124             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10125                 return -EINVAL;
10126
10127         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10128
10129         spin_lock_bh(&tp->lock);
10130         if (device_may_wakeup(dp))
10131                 tg3_flag_set(tp, WOL_ENABLE);
10132         else
10133                 tg3_flag_clear(tp, WOL_ENABLE);
10134         spin_unlock_bh(&tp->lock);
10135
10136         return 0;
10137 }
10138
10139 static u32 tg3_get_msglevel(struct net_device *dev)
10140 {
10141         struct tg3 *tp = netdev_priv(dev);
10142         return tp->msg_enable;
10143 }
10144
10145 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10146 {
10147         struct tg3 *tp = netdev_priv(dev);
10148         tp->msg_enable = value;
10149 }
10150
10151 static int tg3_nway_reset(struct net_device *dev)
10152 {
10153         struct tg3 *tp = netdev_priv(dev);
10154         int r;
10155
10156         if (!netif_running(dev))
10157                 return -EAGAIN;
10158
10159         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10160                 return -EINVAL;
10161
10162         if (tg3_flag(tp, USE_PHYLIB)) {
10163                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10164                         return -EAGAIN;
10165                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10166         } else {
10167                 u32 bmcr;
10168
10169                 spin_lock_bh(&tp->lock);
10170                 r = -EINVAL;
10171                 tg3_readphy(tp, MII_BMCR, &bmcr);
10172                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10173                     ((bmcr & BMCR_ANENABLE) ||
10174                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10175                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10176                                                    BMCR_ANENABLE);
10177                         r = 0;
10178                 }
10179                 spin_unlock_bh(&tp->lock);
10180         }
10181
10182         return r;
10183 }
10184
10185 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10186 {
10187         struct tg3 *tp = netdev_priv(dev);
10188
10189         ering->rx_max_pending = tp->rx_std_ring_mask;
10190         ering->rx_mini_max_pending = 0;
10191         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10192                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10193         else
10194                 ering->rx_jumbo_max_pending = 0;
10195
10196         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10197
10198         ering->rx_pending = tp->rx_pending;
10199         ering->rx_mini_pending = 0;
10200         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10201                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10202         else
10203                 ering->rx_jumbo_pending = 0;
10204
10205         ering->tx_pending = tp->napi[0].tx_pending;
10206 }
10207
10208 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10209 {
10210         struct tg3 *tp = netdev_priv(dev);
10211         int i, irq_sync = 0, err = 0;
10212
10213         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10214             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10215             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10216             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10217             (tg3_flag(tp, TSO_BUG) &&
10218              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10219                 return -EINVAL;
10220
10221         if (netif_running(dev)) {
10222                 tg3_phy_stop(tp);
10223                 tg3_netif_stop(tp);
10224                 irq_sync = 1;
10225         }
10226
10227         tg3_full_lock(tp, irq_sync);
10228
10229         tp->rx_pending = ering->rx_pending;
10230
10231         if (tg3_flag(tp, MAX_RXPEND_64) &&
10232             tp->rx_pending > 63)
10233                 tp->rx_pending = 63;
10234         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10235
10236         for (i = 0; i < tp->irq_max; i++)
10237                 tp->napi[i].tx_pending = ering->tx_pending;
10238
10239         if (netif_running(dev)) {
10240                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10241                 err = tg3_restart_hw(tp, 1);
10242                 if (!err)
10243                         tg3_netif_start(tp);
10244         }
10245
10246         tg3_full_unlock(tp);
10247
10248         if (irq_sync && !err)
10249                 tg3_phy_start(tp);
10250
10251         return err;
10252 }
10253
10254 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10255 {
10256         struct tg3 *tp = netdev_priv(dev);
10257
10258         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10259
10260         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10261                 epause->rx_pause = 1;
10262         else
10263                 epause->rx_pause = 0;
10264
10265         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10266                 epause->tx_pause = 1;
10267         else
10268                 epause->tx_pause = 0;
10269 }
10270
10271 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10272 {
10273         struct tg3 *tp = netdev_priv(dev);
10274         int err = 0;
10275
10276         if (tg3_flag(tp, USE_PHYLIB)) {
10277                 u32 newadv;
10278                 struct phy_device *phydev;
10279
10280                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10281
10282                 if (!(phydev->supported & SUPPORTED_Pause) ||
10283                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10284                      (epause->rx_pause != epause->tx_pause)))
10285                         return -EINVAL;
10286
10287                 tp->link_config.flowctrl = 0;
10288                 if (epause->rx_pause) {
10289                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10290
10291                         if (epause->tx_pause) {
10292                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10293                                 newadv = ADVERTISED_Pause;
10294                         } else
10295                                 newadv = ADVERTISED_Pause |
10296                                          ADVERTISED_Asym_Pause;
10297                 } else if (epause->tx_pause) {
10298                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10299                         newadv = ADVERTISED_Asym_Pause;
10300                 } else
10301                         newadv = 0;
10302
10303                 if (epause->autoneg)
10304                         tg3_flag_set(tp, PAUSE_AUTONEG);
10305                 else
10306                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10307
10308                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10309                         u32 oldadv = phydev->advertising &
10310                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10311                         if (oldadv != newadv) {
10312                                 phydev->advertising &=
10313                                         ~(ADVERTISED_Pause |
10314                                           ADVERTISED_Asym_Pause);
10315                                 phydev->advertising |= newadv;
10316                                 if (phydev->autoneg) {
10317                                         /*
10318                                          * Always renegotiate the link to
10319                                          * inform our link partner of our
10320                                          * flow control settings, even if the
10321                                          * flow control is forced.  Let
10322                                          * tg3_adjust_link() do the final
10323                                          * flow control setup.
10324                                          */
10325                                         return phy_start_aneg(phydev);
10326                                 }
10327                         }
10328
10329                         if (!epause->autoneg)
10330                                 tg3_setup_flow_control(tp, 0, 0);
10331                 } else {
10332                         tp->link_config.orig_advertising &=
10333                                         ~(ADVERTISED_Pause |
10334                                           ADVERTISED_Asym_Pause);
10335                         tp->link_config.orig_advertising |= newadv;
10336                 }
10337         } else {
10338                 int irq_sync = 0;
10339
10340                 if (netif_running(dev)) {
10341                         tg3_netif_stop(tp);
10342                         irq_sync = 1;
10343                 }
10344
10345                 tg3_full_lock(tp, irq_sync);
10346
10347                 if (epause->autoneg)
10348                         tg3_flag_set(tp, PAUSE_AUTONEG);
10349                 else
10350                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10351                 if (epause->rx_pause)
10352                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10353                 else
10354                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10355                 if (epause->tx_pause)
10356                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10357                 else
10358                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10359
10360                 if (netif_running(dev)) {
10361                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10362                         err = tg3_restart_hw(tp, 1);
10363                         if (!err)
10364                                 tg3_netif_start(tp);
10365                 }
10366
10367                 tg3_full_unlock(tp);
10368         }
10369
10370         return err;
10371 }
10372
10373 static int tg3_get_sset_count(struct net_device *dev, int sset)
10374 {
10375         switch (sset) {
10376         case ETH_SS_TEST:
10377                 return TG3_NUM_TEST;
10378         case ETH_SS_STATS:
10379                 return TG3_NUM_STATS;
10380         default:
10381                 return -EOPNOTSUPP;
10382         }
10383 }
10384
10385 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10386 {
10387         switch (stringset) {
10388         case ETH_SS_STATS:
10389                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10390                 break;
10391         case ETH_SS_TEST:
10392                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10393                 break;
10394         default:
10395                 WARN_ON(1);     /* we need a WARN() */
10396                 break;
10397         }
10398 }
10399
10400 static int tg3_set_phys_id(struct net_device *dev,
10401                             enum ethtool_phys_id_state state)
10402 {
10403         struct tg3 *tp = netdev_priv(dev);
10404
10405         if (!netif_running(tp->dev))
10406                 return -EAGAIN;
10407
10408         switch (state) {
10409         case ETHTOOL_ID_ACTIVE:
10410                 return 1;       /* cycle on/off once per second */
10411
10412         case ETHTOOL_ID_ON:
10413                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10414                      LED_CTRL_1000MBPS_ON |
10415                      LED_CTRL_100MBPS_ON |
10416                      LED_CTRL_10MBPS_ON |
10417                      LED_CTRL_TRAFFIC_OVERRIDE |
10418                      LED_CTRL_TRAFFIC_BLINK |
10419                      LED_CTRL_TRAFFIC_LED);
10420                 break;
10421
10422         case ETHTOOL_ID_OFF:
10423                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10424                      LED_CTRL_TRAFFIC_OVERRIDE);
10425                 break;
10426
10427         case ETHTOOL_ID_INACTIVE:
10428                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10429                 break;
10430         }
10431
10432         return 0;
10433 }
10434
10435 static void tg3_get_ethtool_stats(struct net_device *dev,
10436                                    struct ethtool_stats *estats, u64 *tmp_stats)
10437 {
10438         struct tg3 *tp = netdev_priv(dev);
10439         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10440 }
10441
10442 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10443 {
10444         int i;
10445         __be32 *buf;
10446         u32 offset = 0, len = 0;
10447         u32 magic, val;
10448
10449         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10450                 return NULL;
10451
10452         if (magic == TG3_EEPROM_MAGIC) {
10453                 for (offset = TG3_NVM_DIR_START;
10454                      offset < TG3_NVM_DIR_END;
10455                      offset += TG3_NVM_DIRENT_SIZE) {
10456                         if (tg3_nvram_read(tp, offset, &val))
10457                                 return NULL;
10458
10459                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10460                             TG3_NVM_DIRTYPE_EXTVPD)
10461                                 break;
10462                 }
10463
10464                 if (offset != TG3_NVM_DIR_END) {
10465                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10466                         if (tg3_nvram_read(tp, offset + 4, &offset))
10467                                 return NULL;
10468
10469                         offset = tg3_nvram_logical_addr(tp, offset);
10470                 }
10471         }
10472
10473         if (!offset || !len) {
10474                 offset = TG3_NVM_VPD_OFF;
10475                 len = TG3_NVM_VPD_LEN;
10476         }
10477
10478         buf = kmalloc(len, GFP_KERNEL);
10479         if (buf == NULL)
10480                 return NULL;
10481
10482         if (magic == TG3_EEPROM_MAGIC) {
10483                 for (i = 0; i < len; i += 4) {
10484                         /* The data is in little-endian format in NVRAM.
10485                          * Use the big-endian read routines to preserve
10486                          * the byte order as it exists in NVRAM.
10487                          */
10488                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10489                                 goto error;
10490                 }
10491         } else {
10492                 u8 *ptr;
10493                 ssize_t cnt;
10494                 unsigned int pos = 0;
10495
10496                 ptr = (u8 *)&buf[0];
10497                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10498                         cnt = pci_read_vpd(tp->pdev, pos,
10499                                            len - pos, ptr);
10500                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10501                                 cnt = 0;
10502                         else if (cnt < 0)
10503                                 goto error;
10504                 }
10505                 if (pos != len)
10506                         goto error;
10507         }
10508
10509         return buf;
10510
10511 error:
10512         kfree(buf);
10513         return NULL;
10514 }
10515
10516 #define NVRAM_TEST_SIZE 0x100
10517 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10518 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10519 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10520 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10521 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10522 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x4c
10523 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10524 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10525
10526 static int tg3_test_nvram(struct tg3 *tp)
10527 {
10528         u32 csum, magic;
10529         __be32 *buf;
10530         int i, j, k, err = 0, size;
10531
10532         if (tg3_flag(tp, NO_NVRAM))
10533                 return 0;
10534
10535         if (tg3_nvram_read(tp, 0, &magic) != 0)
10536                 return -EIO;
10537
10538         if (magic == TG3_EEPROM_MAGIC)
10539                 size = NVRAM_TEST_SIZE;
10540         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10541                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10542                     TG3_EEPROM_SB_FORMAT_1) {
10543                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10544                         case TG3_EEPROM_SB_REVISION_0:
10545                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10546                                 break;
10547                         case TG3_EEPROM_SB_REVISION_2:
10548                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10549                                 break;
10550                         case TG3_EEPROM_SB_REVISION_3:
10551                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10552                                 break;
10553                         case TG3_EEPROM_SB_REVISION_4:
10554                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10555                                 break;
10556                         case TG3_EEPROM_SB_REVISION_5:
10557                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10558                                 break;
10559                         case TG3_EEPROM_SB_REVISION_6:
10560                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10561                                 break;
10562                         default:
10563                                 return -EIO;
10564                         }
10565                 } else
10566                         return 0;
10567         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10568                 size = NVRAM_SELFBOOT_HW_SIZE;
10569         else
10570                 return -EIO;
10571
10572         buf = kmalloc(size, GFP_KERNEL);
10573         if (buf == NULL)
10574                 return -ENOMEM;
10575
10576         err = -EIO;
10577         for (i = 0, j = 0; i < size; i += 4, j++) {
10578                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10579                 if (err)
10580                         break;
10581         }
10582         if (i < size)
10583                 goto out;
10584
10585         /* Selfboot format */
10586         magic = be32_to_cpu(buf[0]);
10587         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10588             TG3_EEPROM_MAGIC_FW) {
10589                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10590
10591                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10592                     TG3_EEPROM_SB_REVISION_2) {
10593                         /* For rev 2, the csum doesn't include the MBA. */
10594                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10595                                 csum8 += buf8[i];
10596                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10597                                 csum8 += buf8[i];
10598                 } else {
10599                         for (i = 0; i < size; i++)
10600                                 csum8 += buf8[i];
10601                 }
10602
10603                 if (csum8 == 0) {
10604                         err = 0;
10605                         goto out;
10606                 }
10607
10608                 err = -EIO;
10609                 goto out;
10610         }
10611
10612         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10613             TG3_EEPROM_MAGIC_HW) {
10614                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10615                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10616                 u8 *buf8 = (u8 *) buf;
10617
10618                 /* Separate the parity bits and the data bytes.  */
10619                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10620                         if ((i == 0) || (i == 8)) {
10621                                 int l;
10622                                 u8 msk;
10623
10624                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10625                                         parity[k++] = buf8[i] & msk;
10626                                 i++;
10627                         } else if (i == 16) {
10628                                 int l;
10629                                 u8 msk;
10630
10631                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10632                                         parity[k++] = buf8[i] & msk;
10633                                 i++;
10634
10635                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10636                                         parity[k++] = buf8[i] & msk;
10637                                 i++;
10638                         }
10639                         data[j++] = buf8[i];
10640                 }
10641
10642                 err = -EIO;
10643                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10644                         u8 hw8 = hweight8(data[i]);
10645
10646                         if ((hw8 & 0x1) && parity[i])
10647                                 goto out;
10648                         else if (!(hw8 & 0x1) && !parity[i])
10649                                 goto out;
10650                 }
10651                 err = 0;
10652                 goto out;
10653         }
10654
10655         err = -EIO;
10656
10657         /* Bootstrap checksum at offset 0x10 */
10658         csum = calc_crc((unsigned char *) buf, 0x10);
10659         if (csum != le32_to_cpu(buf[0x10/4]))
10660                 goto out;
10661
10662         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10663         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10664         if (csum != le32_to_cpu(buf[0xfc/4]))
10665                 goto out;
10666
10667         kfree(buf);
10668
10669         buf = tg3_vpd_readblock(tp);
10670         if (!buf)
10671                 return -ENOMEM;
10672
10673         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10674                              PCI_VPD_LRDT_RO_DATA);
10675         if (i > 0) {
10676                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10677                 if (j < 0)
10678                         goto out;
10679
10680                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10681                         goto out;
10682
10683                 i += PCI_VPD_LRDT_TAG_SIZE;
10684                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10685                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10686                 if (j > 0) {
10687                         u8 csum8 = 0;
10688
10689                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10690
10691                         for (i = 0; i <= j; i++)
10692                                 csum8 += ((u8 *)buf)[i];
10693
10694                         if (csum8)
10695                                 goto out;
10696                 }
10697         }
10698
10699         err = 0;
10700
10701 out:
10702         kfree(buf);
10703         return err;
10704 }
10705
10706 #define TG3_SERDES_TIMEOUT_SEC  2
10707 #define TG3_COPPER_TIMEOUT_SEC  6
10708
10709 static int tg3_test_link(struct tg3 *tp)
10710 {
10711         int i, max;
10712
10713         if (!netif_running(tp->dev))
10714                 return -ENODEV;
10715
10716         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10717                 max = TG3_SERDES_TIMEOUT_SEC;
10718         else
10719                 max = TG3_COPPER_TIMEOUT_SEC;
10720
10721         for (i = 0; i < max; i++) {
10722                 if (netif_carrier_ok(tp->dev))
10723                         return 0;
10724
10725                 if (msleep_interruptible(1000))
10726                         break;
10727         }
10728
10729         return -EIO;
10730 }
10731
10732 /* Only test the commonly used registers */
10733 static int tg3_test_registers(struct tg3 *tp)
10734 {
10735         int i, is_5705, is_5750;
10736         u32 offset, read_mask, write_mask, val, save_val, read_val;
10737         static struct {
10738                 u16 offset;
10739                 u16 flags;
10740 #define TG3_FL_5705     0x1
10741 #define TG3_FL_NOT_5705 0x2
10742 #define TG3_FL_NOT_5788 0x4
10743 #define TG3_FL_NOT_5750 0x8
10744                 u32 read_mask;
10745                 u32 write_mask;
10746         } reg_tbl[] = {
10747                 /* MAC Control Registers */
10748                 { MAC_MODE, TG3_FL_NOT_5705,
10749                         0x00000000, 0x00ef6f8c },
10750                 { MAC_MODE, TG3_FL_5705,
10751                         0x00000000, 0x01ef6b8c },
10752                 { MAC_STATUS, TG3_FL_NOT_5705,
10753                         0x03800107, 0x00000000 },
10754                 { MAC_STATUS, TG3_FL_5705,
10755                         0x03800100, 0x00000000 },
10756                 { MAC_ADDR_0_HIGH, 0x0000,
10757                         0x00000000, 0x0000ffff },
10758                 { MAC_ADDR_0_LOW, 0x0000,
10759                         0x00000000, 0xffffffff },
10760                 { MAC_RX_MTU_SIZE, 0x0000,
10761                         0x00000000, 0x0000ffff },
10762                 { MAC_TX_MODE, 0x0000,
10763                         0x00000000, 0x00000070 },
10764                 { MAC_TX_LENGTHS, 0x0000,
10765                         0x00000000, 0x00003fff },
10766                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10767                         0x00000000, 0x000007fc },
10768                 { MAC_RX_MODE, TG3_FL_5705,
10769                         0x00000000, 0x000007dc },
10770                 { MAC_HASH_REG_0, 0x0000,
10771                         0x00000000, 0xffffffff },
10772                 { MAC_HASH_REG_1, 0x0000,
10773                         0x00000000, 0xffffffff },
10774                 { MAC_HASH_REG_2, 0x0000,
10775                         0x00000000, 0xffffffff },
10776                 { MAC_HASH_REG_3, 0x0000,
10777                         0x00000000, 0xffffffff },
10778
10779                 /* Receive Data and Receive BD Initiator Control Registers. */
10780                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10781                         0x00000000, 0xffffffff },
10782                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10783                         0x00000000, 0xffffffff },
10784                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10785                         0x00000000, 0x00000003 },
10786                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10787                         0x00000000, 0xffffffff },
10788                 { RCVDBDI_STD_BD+0, 0x0000,
10789                         0x00000000, 0xffffffff },
10790                 { RCVDBDI_STD_BD+4, 0x0000,
10791                         0x00000000, 0xffffffff },
10792                 { RCVDBDI_STD_BD+8, 0x0000,
10793                         0x00000000, 0xffff0002 },
10794                 { RCVDBDI_STD_BD+0xc, 0x0000,
10795                         0x00000000, 0xffffffff },
10796
10797                 /* Receive BD Initiator Control Registers. */
10798                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10799                         0x00000000, 0xffffffff },
10800                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10801                         0x00000000, 0x000003ff },
10802                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10803                         0x00000000, 0xffffffff },
10804
10805                 /* Host Coalescing Control Registers. */
10806                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10807                         0x00000000, 0x00000004 },
10808                 { HOSTCC_MODE, TG3_FL_5705,
10809                         0x00000000, 0x000000f6 },
10810                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10811                         0x00000000, 0xffffffff },
10812                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10813                         0x00000000, 0x000003ff },
10814                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10815                         0x00000000, 0xffffffff },
10816                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10817                         0x00000000, 0x000003ff },
10818                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10819                         0x00000000, 0xffffffff },
10820                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10821                         0x00000000, 0x000000ff },
10822                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10823                         0x00000000, 0xffffffff },
10824                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10825                         0x00000000, 0x000000ff },
10826                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10827                         0x00000000, 0xffffffff },
10828                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10829                         0x00000000, 0xffffffff },
10830                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10831                         0x00000000, 0xffffffff },
10832                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10833                         0x00000000, 0x000000ff },
10834                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10835                         0x00000000, 0xffffffff },
10836                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10837                         0x00000000, 0x000000ff },
10838                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10839                         0x00000000, 0xffffffff },
10840                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10841                         0x00000000, 0xffffffff },
10842                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10843                         0x00000000, 0xffffffff },
10844                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10845                         0x00000000, 0xffffffff },
10846                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10847                         0x00000000, 0xffffffff },
10848                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10849                         0xffffffff, 0x00000000 },
10850                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10851                         0xffffffff, 0x00000000 },
10852
10853                 /* Buffer Manager Control Registers. */
10854                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10855                         0x00000000, 0x007fff80 },
10856                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10857                         0x00000000, 0x007fffff },
10858                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10859                         0x00000000, 0x0000003f },
10860                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10861                         0x00000000, 0x000001ff },
10862                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10863                         0x00000000, 0x000001ff },
10864                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10865                         0xffffffff, 0x00000000 },
10866                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10867                         0xffffffff, 0x00000000 },
10868
10869                 /* Mailbox Registers */
10870                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10871                         0x00000000, 0x000001ff },
10872                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10873                         0x00000000, 0x000001ff },
10874                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10875                         0x00000000, 0x000007ff },
10876                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10877                         0x00000000, 0x000001ff },
10878
10879                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10880         };
10881
10882         is_5705 = is_5750 = 0;
10883         if (tg3_flag(tp, 5705_PLUS)) {
10884                 is_5705 = 1;
10885                 if (tg3_flag(tp, 5750_PLUS))
10886                         is_5750 = 1;
10887         }
10888
10889         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10890                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10891                         continue;
10892
10893                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10894                         continue;
10895
10896                 if (tg3_flag(tp, IS_5788) &&
10897                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10898                         continue;
10899
10900                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10901                         continue;
10902
10903                 offset = (u32) reg_tbl[i].offset;
10904                 read_mask = reg_tbl[i].read_mask;
10905                 write_mask = reg_tbl[i].write_mask;
10906
10907                 /* Save the original register content */
10908                 save_val = tr32(offset);
10909
10910                 /* Determine the read-only value. */
10911                 read_val = save_val & read_mask;
10912
10913                 /* Write zero to the register, then make sure the read-only bits
10914                  * are not changed and the read/write bits are all zeros.
10915                  */
10916                 tw32(offset, 0);
10917
10918                 val = tr32(offset);
10919
10920                 /* Test the read-only and read/write bits. */
10921                 if (((val & read_mask) != read_val) || (val & write_mask))
10922                         goto out;
10923
10924                 /* Write ones to all the bits defined by RdMask and WrMask, then
10925                  * make sure the read-only bits are not changed and the
10926                  * read/write bits are all ones.
10927                  */
10928                 tw32(offset, read_mask | write_mask);
10929
10930                 val = tr32(offset);
10931
10932                 /* Test the read-only bits. */
10933                 if ((val & read_mask) != read_val)
10934                         goto out;
10935
10936                 /* Test the read/write bits. */
10937                 if ((val & write_mask) != write_mask)
10938                         goto out;
10939
10940                 tw32(offset, save_val);
10941         }
10942
10943         return 0;
10944
10945 out:
10946         if (netif_msg_hw(tp))
10947                 netdev_err(tp->dev,
10948                            "Register test failed at offset %x\n", offset);
10949         tw32(offset, save_val);
10950         return -EIO;
10951 }
10952
10953 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10954 {
10955         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10956         int i;
10957         u32 j;
10958
10959         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10960                 for (j = 0; j < len; j += 4) {
10961                         u32 val;
10962
10963                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10964                         tg3_read_mem(tp, offset + j, &val);
10965                         if (val != test_pattern[i])
10966                                 return -EIO;
10967                 }
10968         }
10969         return 0;
10970 }
10971
10972 static int tg3_test_memory(struct tg3 *tp)
10973 {
10974         static struct mem_entry {
10975                 u32 offset;
10976                 u32 len;
10977         } mem_tbl_570x[] = {
10978                 { 0x00000000, 0x00b50},
10979                 { 0x00002000, 0x1c000},
10980                 { 0xffffffff, 0x00000}
10981         }, mem_tbl_5705[] = {
10982                 { 0x00000100, 0x0000c},
10983                 { 0x00000200, 0x00008},
10984                 { 0x00004000, 0x00800},
10985                 { 0x00006000, 0x01000},
10986                 { 0x00008000, 0x02000},
10987                 { 0x00010000, 0x0e000},
10988                 { 0xffffffff, 0x00000}
10989         }, mem_tbl_5755[] = {
10990                 { 0x00000200, 0x00008},
10991                 { 0x00004000, 0x00800},
10992                 { 0x00006000, 0x00800},
10993                 { 0x00008000, 0x02000},
10994                 { 0x00010000, 0x0c000},
10995                 { 0xffffffff, 0x00000}
10996         }, mem_tbl_5906[] = {
10997                 { 0x00000200, 0x00008},
10998                 { 0x00004000, 0x00400},
10999                 { 0x00006000, 0x00400},
11000                 { 0x00008000, 0x01000},
11001                 { 0x00010000, 0x01000},
11002                 { 0xffffffff, 0x00000}
11003         }, mem_tbl_5717[] = {
11004                 { 0x00000200, 0x00008},
11005                 { 0x00010000, 0x0a000},
11006                 { 0x00020000, 0x13c00},
11007                 { 0xffffffff, 0x00000}
11008         }, mem_tbl_57765[] = {
11009                 { 0x00000200, 0x00008},
11010                 { 0x00004000, 0x00800},
11011                 { 0x00006000, 0x09800},
11012                 { 0x00010000, 0x0a000},
11013                 { 0xffffffff, 0x00000}
11014         };
11015         struct mem_entry *mem_tbl;
11016         int err = 0;
11017         int i;
11018
11019         if (tg3_flag(tp, 5717_PLUS))
11020                 mem_tbl = mem_tbl_5717;
11021         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11022                 mem_tbl = mem_tbl_57765;
11023         else if (tg3_flag(tp, 5755_PLUS))
11024                 mem_tbl = mem_tbl_5755;
11025         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11026                 mem_tbl = mem_tbl_5906;
11027         else if (tg3_flag(tp, 5705_PLUS))
11028                 mem_tbl = mem_tbl_5705;
11029         else
11030                 mem_tbl = mem_tbl_570x;
11031
11032         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11033                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11034                 if (err)
11035                         break;
11036         }
11037
11038         return err;
11039 }
11040
11041 #define TG3_MAC_LOOPBACK        0
11042 #define TG3_PHY_LOOPBACK        1
11043 #define TG3_TSO_LOOPBACK        2
11044
11045 #define TG3_TSO_MSS             500
11046
11047 #define TG3_TSO_IP_HDR_LEN      20
11048 #define TG3_TSO_TCP_HDR_LEN     20
11049 #define TG3_TSO_TCP_OPT_LEN     12
11050
11051 static const u8 tg3_tso_header[] = {
11052 0x08, 0x00,
11053 0x45, 0x00, 0x00, 0x00,
11054 0x00, 0x00, 0x40, 0x00,
11055 0x40, 0x06, 0x00, 0x00,
11056 0x0a, 0x00, 0x00, 0x01,
11057 0x0a, 0x00, 0x00, 0x02,
11058 0x0d, 0x00, 0xe0, 0x00,
11059 0x00, 0x00, 0x01, 0x00,
11060 0x00, 0x00, 0x02, 0x00,
11061 0x80, 0x10, 0x10, 0x00,
11062 0x14, 0x09, 0x00, 0x00,
11063 0x01, 0x01, 0x08, 0x0a,
11064 0x11, 0x11, 0x11, 0x11,
11065 0x11, 0x11, 0x11, 0x11,
11066 };
11067
11068 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11069 {
11070         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11071         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11072         struct sk_buff *skb, *rx_skb;
11073         u8 *tx_data;
11074         dma_addr_t map;
11075         int num_pkts, tx_len, rx_len, i, err;
11076         struct tg3_rx_buffer_desc *desc;
11077         struct tg3_napi *tnapi, *rnapi;
11078         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11079
11080         tnapi = &tp->napi[0];
11081         rnapi = &tp->napi[0];
11082         if (tp->irq_cnt > 1) {
11083                 if (tg3_flag(tp, ENABLE_RSS))
11084                         rnapi = &tp->napi[1];
11085                 if (tg3_flag(tp, ENABLE_TSS))
11086                         tnapi = &tp->napi[1];
11087         }
11088         coal_now = tnapi->coal_now | rnapi->coal_now;
11089
11090         if (loopback_mode == TG3_MAC_LOOPBACK) {
11091                 /* HW errata - mac loopback fails in some cases on 5780.
11092                  * Normal traffic and PHY loopback are not affected by
11093                  * errata.  Also, the MAC loopback test is deprecated for
11094                  * all newer ASIC revisions.
11095                  */
11096                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11097                     tg3_flag(tp, CPMU_PRESENT))
11098                         return 0;
11099
11100                 mac_mode = tp->mac_mode &
11101                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11102                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11103                 if (!tg3_flag(tp, 5705_PLUS))
11104                         mac_mode |= MAC_MODE_LINK_POLARITY;
11105                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11106                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11107                 else
11108                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11109                 tw32(MAC_MODE, mac_mode);
11110         } else {
11111                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11112                         tg3_phy_fet_toggle_apd(tp, false);
11113                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11114                 } else
11115                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11116
11117                 tg3_phy_toggle_automdix(tp, 0);
11118
11119                 tg3_writephy(tp, MII_BMCR, val);
11120                 udelay(40);
11121
11122                 mac_mode = tp->mac_mode &
11123                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11124                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11125                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11126                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11127                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11128                         /* The write needs to be flushed for the AC131 */
11129                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11130                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11131                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11132                 } else
11133                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11134
11135                 /* reset to prevent losing 1st rx packet intermittently */
11136                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11137                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11138                         udelay(10);
11139                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11140                 }
11141                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11142                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11143                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11144                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11145                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11146                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11147                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11148                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11149                 }
11150                 tw32(MAC_MODE, mac_mode);
11151
11152                 /* Wait for link */
11153                 for (i = 0; i < 100; i++) {
11154                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11155                                 break;
11156                         mdelay(1);
11157                 }
11158         }
11159
11160         err = -EIO;
11161
11162         tx_len = pktsz;
11163         skb = netdev_alloc_skb(tp->dev, tx_len);
11164         if (!skb)
11165                 return -ENOMEM;
11166
11167         tx_data = skb_put(skb, tx_len);
11168         memcpy(tx_data, tp->dev->dev_addr, 6);
11169         memset(tx_data + 6, 0x0, 8);
11170
11171         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11172
11173         if (loopback_mode == TG3_TSO_LOOPBACK) {
11174                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11175
11176                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11177                               TG3_TSO_TCP_OPT_LEN;
11178
11179                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11180                        sizeof(tg3_tso_header));
11181                 mss = TG3_TSO_MSS;
11182
11183                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11184                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11185
11186                 /* Set the total length field in the IP header */
11187                 iph->tot_len = htons((u16)(mss + hdr_len));
11188
11189                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11190                               TXD_FLAG_CPU_POST_DMA);
11191
11192                 if (tg3_flag(tp, HW_TSO_1) ||
11193                     tg3_flag(tp, HW_TSO_2) ||
11194                     tg3_flag(tp, HW_TSO_3)) {
11195                         struct tcphdr *th;
11196                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11197                         th = (struct tcphdr *)&tx_data[val];
11198                         th->check = 0;
11199                 } else
11200                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11201
11202                 if (tg3_flag(tp, HW_TSO_3)) {
11203                         mss |= (hdr_len & 0xc) << 12;
11204                         if (hdr_len & 0x10)
11205                                 base_flags |= 0x00000010;
11206                         base_flags |= (hdr_len & 0x3e0) << 5;
11207                 } else if (tg3_flag(tp, HW_TSO_2))
11208                         mss |= hdr_len << 9;
11209                 else if (tg3_flag(tp, HW_TSO_1) ||
11210                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11211                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11212                 } else {
11213                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11214                 }
11215
11216                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11217         } else {
11218                 num_pkts = 1;
11219                 data_off = ETH_HLEN;
11220         }
11221
11222         for (i = data_off; i < tx_len; i++)
11223                 tx_data[i] = (u8) (i & 0xff);
11224
11225         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11226         if (pci_dma_mapping_error(tp->pdev, map)) {
11227                 dev_kfree_skb(skb);
11228                 return -EIO;
11229         }
11230
11231         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11232                rnapi->coal_now);
11233
11234         udelay(10);
11235
11236         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11237
11238         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11239                     base_flags, (mss << 1) | 1);
11240
11241         tnapi->tx_prod++;
11242
11243         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11244         tr32_mailbox(tnapi->prodmbox);
11245
11246         udelay(10);
11247
11248         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11249         for (i = 0; i < 35; i++) {
11250                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11251                        coal_now);
11252
11253                 udelay(10);
11254
11255                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11256                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11257                 if ((tx_idx == tnapi->tx_prod) &&
11258                     (rx_idx == (rx_start_idx + num_pkts)))
11259                         break;
11260         }
11261
11262         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11263         dev_kfree_skb(skb);
11264
11265         if (tx_idx != tnapi->tx_prod)
11266                 goto out;
11267
11268         if (rx_idx != rx_start_idx + num_pkts)
11269                 goto out;
11270
11271         val = data_off;
11272         while (rx_idx != rx_start_idx) {
11273                 desc = &rnapi->rx_rcb[rx_start_idx++];
11274                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11275                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11276
11277                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11278                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11279                         goto out;
11280
11281                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11282                          - ETH_FCS_LEN;
11283
11284                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11285                         if (rx_len != tx_len)
11286                                 goto out;
11287
11288                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11289                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11290                                         goto out;
11291                         } else {
11292                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11293                                         goto out;
11294                         }
11295                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11296                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11297                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11298                         goto out;
11299                 }
11300
11301                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11302                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11303                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11304                                              mapping);
11305                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11306                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11307                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11308                                              mapping);
11309                 } else
11310                         goto out;
11311
11312                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11313                                             PCI_DMA_FROMDEVICE);
11314
11315                 for (i = data_off; i < rx_len; i++, val++) {
11316                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11317                                 goto out;
11318                 }
11319         }
11320
11321         err = 0;
11322
11323         /* tg3_free_rings will unmap and free the rx_skb */
11324 out:
11325         return err;
11326 }
11327
11328 #define TG3_STD_LOOPBACK_FAILED         1
11329 #define TG3_JMB_LOOPBACK_FAILED         2
11330 #define TG3_TSO_LOOPBACK_FAILED         4
11331
11332 #define TG3_MAC_LOOPBACK_SHIFT          0
11333 #define TG3_PHY_LOOPBACK_SHIFT          4
11334 #define TG3_LOOPBACK_FAILED             0x00000077
11335
11336 static int tg3_test_loopback(struct tg3 *tp)
11337 {
11338         int err = 0;
11339         u32 eee_cap, cpmuctrl = 0;
11340
11341         if (!netif_running(tp->dev))
11342                 return TG3_LOOPBACK_FAILED;
11343
11344         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11345         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11346
11347         err = tg3_reset_hw(tp, 1);
11348         if (err) {
11349                 err = TG3_LOOPBACK_FAILED;
11350                 goto done;
11351         }
11352
11353         if (tg3_flag(tp, ENABLE_RSS)) {
11354                 int i;
11355
11356                 /* Reroute all rx packets to the 1st queue */
11357                 for (i = MAC_RSS_INDIR_TBL_0;
11358                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11359                         tw32(i, 0x0);
11360         }
11361
11362         /* Turn off gphy autopowerdown. */
11363         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11364                 tg3_phy_toggle_apd(tp, false);
11365
11366         if (tg3_flag(tp, CPMU_PRESENT)) {
11367                 int i;
11368                 u32 status;
11369
11370                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11371
11372                 /* Wait for up to 40 microseconds to acquire lock. */
11373                 for (i = 0; i < 4; i++) {
11374                         status = tr32(TG3_CPMU_MUTEX_GNT);
11375                         if (status == CPMU_MUTEX_GNT_DRIVER)
11376                                 break;
11377                         udelay(10);
11378                 }
11379
11380                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11381                         err = TG3_LOOPBACK_FAILED;
11382                         goto done;
11383                 }
11384
11385                 /* Turn off link-based power management. */
11386                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11387                 tw32(TG3_CPMU_CTRL,
11388                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11389                                   CPMU_CTRL_LINK_AWARE_MODE));
11390         }
11391
11392         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11393                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11394
11395         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11396             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11397                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11398
11399         if (tg3_flag(tp, CPMU_PRESENT)) {
11400                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11401
11402                 /* Release the mutex */
11403                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11404         }
11405
11406         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11407             !tg3_flag(tp, USE_PHYLIB)) {
11408                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11409                         err |= TG3_STD_LOOPBACK_FAILED <<
11410                                TG3_PHY_LOOPBACK_SHIFT;
11411                 if (tg3_flag(tp, TSO_CAPABLE) &&
11412                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11413                         err |= TG3_TSO_LOOPBACK_FAILED <<
11414                                TG3_PHY_LOOPBACK_SHIFT;
11415                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11416                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11417                         err |= TG3_JMB_LOOPBACK_FAILED <<
11418                                TG3_PHY_LOOPBACK_SHIFT;
11419         }
11420
11421         /* Re-enable gphy autopowerdown. */
11422         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11423                 tg3_phy_toggle_apd(tp, true);
11424
11425 done:
11426         tp->phy_flags |= eee_cap;
11427
11428         return err;
11429 }
11430
11431 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11432                           u64 *data)
11433 {
11434         struct tg3 *tp = netdev_priv(dev);
11435
11436         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11437                 tg3_power_up(tp);
11438
11439         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11440
11441         if (tg3_test_nvram(tp) != 0) {
11442                 etest->flags |= ETH_TEST_FL_FAILED;
11443                 data[0] = 1;
11444         }
11445         if (tg3_test_link(tp) != 0) {
11446                 etest->flags |= ETH_TEST_FL_FAILED;
11447                 data[1] = 1;
11448         }
11449         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11450                 int err, err2 = 0, irq_sync = 0;
11451
11452                 if (netif_running(dev)) {
11453                         tg3_phy_stop(tp);
11454                         tg3_netif_stop(tp);
11455                         irq_sync = 1;
11456                 }
11457
11458                 tg3_full_lock(tp, irq_sync);
11459
11460                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11461                 err = tg3_nvram_lock(tp);
11462                 tg3_halt_cpu(tp, RX_CPU_BASE);
11463                 if (!tg3_flag(tp, 5705_PLUS))
11464                         tg3_halt_cpu(tp, TX_CPU_BASE);
11465                 if (!err)
11466                         tg3_nvram_unlock(tp);
11467
11468                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11469                         tg3_phy_reset(tp);
11470
11471                 if (tg3_test_registers(tp) != 0) {
11472                         etest->flags |= ETH_TEST_FL_FAILED;
11473                         data[2] = 1;
11474                 }
11475                 if (tg3_test_memory(tp) != 0) {
11476                         etest->flags |= ETH_TEST_FL_FAILED;
11477                         data[3] = 1;
11478                 }
11479                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11480                         etest->flags |= ETH_TEST_FL_FAILED;
11481
11482                 tg3_full_unlock(tp);
11483
11484                 if (tg3_test_interrupt(tp) != 0) {
11485                         etest->flags |= ETH_TEST_FL_FAILED;
11486                         data[5] = 1;
11487                 }
11488
11489                 tg3_full_lock(tp, 0);
11490
11491                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11492                 if (netif_running(dev)) {
11493                         tg3_flag_set(tp, INIT_COMPLETE);
11494                         err2 = tg3_restart_hw(tp, 1);
11495                         if (!err2)
11496                                 tg3_netif_start(tp);
11497                 }
11498
11499                 tg3_full_unlock(tp);
11500
11501                 if (irq_sync && !err2)
11502                         tg3_phy_start(tp);
11503         }
11504         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11505                 tg3_power_down(tp);
11506
11507 }
11508
11509 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11510 {
11511         struct mii_ioctl_data *data = if_mii(ifr);
11512         struct tg3 *tp = netdev_priv(dev);
11513         int err;
11514
11515         if (tg3_flag(tp, USE_PHYLIB)) {
11516                 struct phy_device *phydev;
11517                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11518                         return -EAGAIN;
11519                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11520                 return phy_mii_ioctl(phydev, ifr, cmd);
11521         }
11522
11523         switch (cmd) {
11524         case SIOCGMIIPHY:
11525                 data->phy_id = tp->phy_addr;
11526
11527                 /* fallthru */
11528         case SIOCGMIIREG: {
11529                 u32 mii_regval;
11530
11531                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11532                         break;                  /* We have no PHY */
11533
11534                 if (!netif_running(dev))
11535                         return -EAGAIN;
11536
11537                 spin_lock_bh(&tp->lock);
11538                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11539                 spin_unlock_bh(&tp->lock);
11540
11541                 data->val_out = mii_regval;
11542
11543                 return err;
11544         }
11545
11546         case SIOCSMIIREG:
11547                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11548                         break;                  /* We have no PHY */
11549
11550                 if (!netif_running(dev))
11551                         return -EAGAIN;
11552
11553                 spin_lock_bh(&tp->lock);
11554                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11555                 spin_unlock_bh(&tp->lock);
11556
11557                 return err;
11558
11559         default:
11560                 /* do nothing */
11561                 break;
11562         }
11563         return -EOPNOTSUPP;
11564 }
11565
11566 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11567 {
11568         struct tg3 *tp = netdev_priv(dev);
11569
11570         memcpy(ec, &tp->coal, sizeof(*ec));
11571         return 0;
11572 }
11573
11574 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11575 {
11576         struct tg3 *tp = netdev_priv(dev);
11577         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11578         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11579
11580         if (!tg3_flag(tp, 5705_PLUS)) {
11581                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11582                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11583                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11584                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11585         }
11586
11587         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11588             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11589             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11590             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11591             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11592             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11593             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11594             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11595             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11596             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11597                 return -EINVAL;
11598
11599         /* No rx interrupts will be generated if both are zero */
11600         if ((ec->rx_coalesce_usecs == 0) &&
11601             (ec->rx_max_coalesced_frames == 0))
11602                 return -EINVAL;
11603
11604         /* No tx interrupts will be generated if both are zero */
11605         if ((ec->tx_coalesce_usecs == 0) &&
11606             (ec->tx_max_coalesced_frames == 0))
11607                 return -EINVAL;
11608
11609         /* Only copy relevant parameters, ignore all others. */
11610         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11611         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11612         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11613         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11614         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11615         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11616         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11617         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11618         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11619
11620         if (netif_running(dev)) {
11621                 tg3_full_lock(tp, 0);
11622                 __tg3_set_coalesce(tp, &tp->coal);
11623                 tg3_full_unlock(tp);
11624         }
11625         return 0;
11626 }
11627
11628 static const struct ethtool_ops tg3_ethtool_ops = {
11629         .get_settings           = tg3_get_settings,
11630         .set_settings           = tg3_set_settings,
11631         .get_drvinfo            = tg3_get_drvinfo,
11632         .get_regs_len           = tg3_get_regs_len,
11633         .get_regs               = tg3_get_regs,
11634         .get_wol                = tg3_get_wol,
11635         .set_wol                = tg3_set_wol,
11636         .get_msglevel           = tg3_get_msglevel,
11637         .set_msglevel           = tg3_set_msglevel,
11638         .nway_reset             = tg3_nway_reset,
11639         .get_link               = ethtool_op_get_link,
11640         .get_eeprom_len         = tg3_get_eeprom_len,
11641         .get_eeprom             = tg3_get_eeprom,
11642         .set_eeprom             = tg3_set_eeprom,
11643         .get_ringparam          = tg3_get_ringparam,
11644         .set_ringparam          = tg3_set_ringparam,
11645         .get_pauseparam         = tg3_get_pauseparam,
11646         .set_pauseparam         = tg3_set_pauseparam,
11647         .self_test              = tg3_self_test,
11648         .get_strings            = tg3_get_strings,
11649         .set_phys_id            = tg3_set_phys_id,
11650         .get_ethtool_stats      = tg3_get_ethtool_stats,
11651         .get_coalesce           = tg3_get_coalesce,
11652         .set_coalesce           = tg3_set_coalesce,
11653         .get_sset_count         = tg3_get_sset_count,
11654 };
11655
11656 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11657 {
11658         u32 cursize, val, magic;
11659
11660         tp->nvram_size = EEPROM_CHIP_SIZE;
11661
11662         if (tg3_nvram_read(tp, 0, &magic) != 0)
11663                 return;
11664
11665         if ((magic != TG3_EEPROM_MAGIC) &&
11666             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11667             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11668                 return;
11669
11670         /*
11671          * Size the chip by reading offsets at increasing powers of two.
11672          * When we encounter our validation signature, we know the addressing
11673          * has wrapped around, and thus have our chip size.
11674          */
11675         cursize = 0x10;
11676
11677         while (cursize < tp->nvram_size) {
11678                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11679                         return;
11680
11681                 if (val == magic)
11682                         break;
11683
11684                 cursize <<= 1;
11685         }
11686
11687         tp->nvram_size = cursize;
11688 }
11689
11690 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11691 {
11692         u32 val;
11693
11694         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11695                 return;
11696
11697         /* Selfboot format */
11698         if (val != TG3_EEPROM_MAGIC) {
11699                 tg3_get_eeprom_size(tp);
11700                 return;
11701         }
11702
11703         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11704                 if (val != 0) {
11705                         /* This is confusing.  We want to operate on the
11706                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11707                          * call will read from NVRAM and byteswap the data
11708                          * according to the byteswapping settings for all
11709                          * other register accesses.  This ensures the data we
11710                          * want will always reside in the lower 16-bits.
11711                          * However, the data in NVRAM is in LE format, which
11712                          * means the data from the NVRAM read will always be
11713                          * opposite the endianness of the CPU.  The 16-bit
11714                          * byteswap then brings the data to CPU endianness.
11715                          */
11716                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11717                         return;
11718                 }
11719         }
11720         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11721 }
11722
11723 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11724 {
11725         u32 nvcfg1;
11726
11727         nvcfg1 = tr32(NVRAM_CFG1);
11728         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11729                 tg3_flag_set(tp, FLASH);
11730         } else {
11731                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11732                 tw32(NVRAM_CFG1, nvcfg1);
11733         }
11734
11735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11736             tg3_flag(tp, 5780_CLASS)) {
11737                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11738                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11739                         tp->nvram_jedecnum = JEDEC_ATMEL;
11740                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11741                         tg3_flag_set(tp, NVRAM_BUFFERED);
11742                         break;
11743                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11744                         tp->nvram_jedecnum = JEDEC_ATMEL;
11745                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11746                         break;
11747                 case FLASH_VENDOR_ATMEL_EEPROM:
11748                         tp->nvram_jedecnum = JEDEC_ATMEL;
11749                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11750                         tg3_flag_set(tp, NVRAM_BUFFERED);
11751                         break;
11752                 case FLASH_VENDOR_ST:
11753                         tp->nvram_jedecnum = JEDEC_ST;
11754                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11755                         tg3_flag_set(tp, NVRAM_BUFFERED);
11756                         break;
11757                 case FLASH_VENDOR_SAIFUN:
11758                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11759                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11760                         break;
11761                 case FLASH_VENDOR_SST_SMALL:
11762                 case FLASH_VENDOR_SST_LARGE:
11763                         tp->nvram_jedecnum = JEDEC_SST;
11764                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11765                         break;
11766                 }
11767         } else {
11768                 tp->nvram_jedecnum = JEDEC_ATMEL;
11769                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11770                 tg3_flag_set(tp, NVRAM_BUFFERED);
11771         }
11772 }
11773
11774 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11775 {
11776         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11777         case FLASH_5752PAGE_SIZE_256:
11778                 tp->nvram_pagesize = 256;
11779                 break;
11780         case FLASH_5752PAGE_SIZE_512:
11781                 tp->nvram_pagesize = 512;
11782                 break;
11783         case FLASH_5752PAGE_SIZE_1K:
11784                 tp->nvram_pagesize = 1024;
11785                 break;
11786         case FLASH_5752PAGE_SIZE_2K:
11787                 tp->nvram_pagesize = 2048;
11788                 break;
11789         case FLASH_5752PAGE_SIZE_4K:
11790                 tp->nvram_pagesize = 4096;
11791                 break;
11792         case FLASH_5752PAGE_SIZE_264:
11793                 tp->nvram_pagesize = 264;
11794                 break;
11795         case FLASH_5752PAGE_SIZE_528:
11796                 tp->nvram_pagesize = 528;
11797                 break;
11798         }
11799 }
11800
11801 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11802 {
11803         u32 nvcfg1;
11804
11805         nvcfg1 = tr32(NVRAM_CFG1);
11806
11807         /* NVRAM protection for TPM */
11808         if (nvcfg1 & (1 << 27))
11809                 tg3_flag_set(tp, PROTECTED_NVRAM);
11810
11811         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11812         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11813         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11814                 tp->nvram_jedecnum = JEDEC_ATMEL;
11815                 tg3_flag_set(tp, NVRAM_BUFFERED);
11816                 break;
11817         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11818                 tp->nvram_jedecnum = JEDEC_ATMEL;
11819                 tg3_flag_set(tp, NVRAM_BUFFERED);
11820                 tg3_flag_set(tp, FLASH);
11821                 break;
11822         case FLASH_5752VENDOR_ST_M45PE10:
11823         case FLASH_5752VENDOR_ST_M45PE20:
11824         case FLASH_5752VENDOR_ST_M45PE40:
11825                 tp->nvram_jedecnum = JEDEC_ST;
11826                 tg3_flag_set(tp, NVRAM_BUFFERED);
11827                 tg3_flag_set(tp, FLASH);
11828                 break;
11829         }
11830
11831         if (tg3_flag(tp, FLASH)) {
11832                 tg3_nvram_get_pagesize(tp, nvcfg1);
11833         } else {
11834                 /* For eeprom, set pagesize to maximum eeprom size */
11835                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11836
11837                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11838                 tw32(NVRAM_CFG1, nvcfg1);
11839         }
11840 }
11841
11842 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11843 {
11844         u32 nvcfg1, protect = 0;
11845
11846         nvcfg1 = tr32(NVRAM_CFG1);
11847
11848         /* NVRAM protection for TPM */
11849         if (nvcfg1 & (1 << 27)) {
11850                 tg3_flag_set(tp, PROTECTED_NVRAM);
11851                 protect = 1;
11852         }
11853
11854         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11855         switch (nvcfg1) {
11856         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11857         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11858         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11859         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11860                 tp->nvram_jedecnum = JEDEC_ATMEL;
11861                 tg3_flag_set(tp, NVRAM_BUFFERED);
11862                 tg3_flag_set(tp, FLASH);
11863                 tp->nvram_pagesize = 264;
11864                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11865                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11866                         tp->nvram_size = (protect ? 0x3e200 :
11867                                           TG3_NVRAM_SIZE_512KB);
11868                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11869                         tp->nvram_size = (protect ? 0x1f200 :
11870                                           TG3_NVRAM_SIZE_256KB);
11871                 else
11872                         tp->nvram_size = (protect ? 0x1f200 :
11873                                           TG3_NVRAM_SIZE_128KB);
11874                 break;
11875         case FLASH_5752VENDOR_ST_M45PE10:
11876         case FLASH_5752VENDOR_ST_M45PE20:
11877         case FLASH_5752VENDOR_ST_M45PE40:
11878                 tp->nvram_jedecnum = JEDEC_ST;
11879                 tg3_flag_set(tp, NVRAM_BUFFERED);
11880                 tg3_flag_set(tp, FLASH);
11881                 tp->nvram_pagesize = 256;
11882                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11883                         tp->nvram_size = (protect ?
11884                                           TG3_NVRAM_SIZE_64KB :
11885                                           TG3_NVRAM_SIZE_128KB);
11886                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11887                         tp->nvram_size = (protect ?
11888                                           TG3_NVRAM_SIZE_64KB :
11889                                           TG3_NVRAM_SIZE_256KB);
11890                 else
11891                         tp->nvram_size = (protect ?
11892                                           TG3_NVRAM_SIZE_128KB :
11893                                           TG3_NVRAM_SIZE_512KB);
11894                 break;
11895         }
11896 }
11897
11898 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11899 {
11900         u32 nvcfg1;
11901
11902         nvcfg1 = tr32(NVRAM_CFG1);
11903
11904         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11905         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11906         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11907         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11908         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11909                 tp->nvram_jedecnum = JEDEC_ATMEL;
11910                 tg3_flag_set(tp, NVRAM_BUFFERED);
11911                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11912
11913                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11914                 tw32(NVRAM_CFG1, nvcfg1);
11915                 break;
11916         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11917         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11918         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11919         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11920                 tp->nvram_jedecnum = JEDEC_ATMEL;
11921                 tg3_flag_set(tp, NVRAM_BUFFERED);
11922                 tg3_flag_set(tp, FLASH);
11923                 tp->nvram_pagesize = 264;
11924                 break;
11925         case FLASH_5752VENDOR_ST_M45PE10:
11926         case FLASH_5752VENDOR_ST_M45PE20:
11927         case FLASH_5752VENDOR_ST_M45PE40:
11928                 tp->nvram_jedecnum = JEDEC_ST;
11929                 tg3_flag_set(tp, NVRAM_BUFFERED);
11930                 tg3_flag_set(tp, FLASH);
11931                 tp->nvram_pagesize = 256;
11932                 break;
11933         }
11934 }
11935
11936 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11937 {
11938         u32 nvcfg1, protect = 0;
11939
11940         nvcfg1 = tr32(NVRAM_CFG1);
11941
11942         /* NVRAM protection for TPM */
11943         if (nvcfg1 & (1 << 27)) {
11944                 tg3_flag_set(tp, PROTECTED_NVRAM);
11945                 protect = 1;
11946         }
11947
11948         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11949         switch (nvcfg1) {
11950         case FLASH_5761VENDOR_ATMEL_ADB021D:
11951         case FLASH_5761VENDOR_ATMEL_ADB041D:
11952         case FLASH_5761VENDOR_ATMEL_ADB081D:
11953         case FLASH_5761VENDOR_ATMEL_ADB161D:
11954         case FLASH_5761VENDOR_ATMEL_MDB021D:
11955         case FLASH_5761VENDOR_ATMEL_MDB041D:
11956         case FLASH_5761VENDOR_ATMEL_MDB081D:
11957         case FLASH_5761VENDOR_ATMEL_MDB161D:
11958                 tp->nvram_jedecnum = JEDEC_ATMEL;
11959                 tg3_flag_set(tp, NVRAM_BUFFERED);
11960                 tg3_flag_set(tp, FLASH);
11961                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11962                 tp->nvram_pagesize = 256;
11963                 break;
11964         case FLASH_5761VENDOR_ST_A_M45PE20:
11965         case FLASH_5761VENDOR_ST_A_M45PE40:
11966         case FLASH_5761VENDOR_ST_A_M45PE80:
11967         case FLASH_5761VENDOR_ST_A_M45PE16:
11968         case FLASH_5761VENDOR_ST_M_M45PE20:
11969         case FLASH_5761VENDOR_ST_M_M45PE40:
11970         case FLASH_5761VENDOR_ST_M_M45PE80:
11971         case FLASH_5761VENDOR_ST_M_M45PE16:
11972                 tp->nvram_jedecnum = JEDEC_ST;
11973                 tg3_flag_set(tp, NVRAM_BUFFERED);
11974                 tg3_flag_set(tp, FLASH);
11975                 tp->nvram_pagesize = 256;
11976                 break;
11977         }
11978
11979         if (protect) {
11980                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11981         } else {
11982                 switch (nvcfg1) {
11983                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11984                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11985                 case FLASH_5761VENDOR_ST_A_M45PE16:
11986                 case FLASH_5761VENDOR_ST_M_M45PE16:
11987                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11988                         break;
11989                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11990                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11991                 case FLASH_5761VENDOR_ST_A_M45PE80:
11992                 case FLASH_5761VENDOR_ST_M_M45PE80:
11993                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11994                         break;
11995                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11996                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11997                 case FLASH_5761VENDOR_ST_A_M45PE40:
11998                 case FLASH_5761VENDOR_ST_M_M45PE40:
11999                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12000                         break;
12001                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12002                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12003                 case FLASH_5761VENDOR_ST_A_M45PE20:
12004                 case FLASH_5761VENDOR_ST_M_M45PE20:
12005                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12006                         break;
12007                 }
12008         }
12009 }
12010
12011 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12012 {
12013         tp->nvram_jedecnum = JEDEC_ATMEL;
12014         tg3_flag_set(tp, NVRAM_BUFFERED);
12015         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12016 }
12017
12018 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12019 {
12020         u32 nvcfg1;
12021
12022         nvcfg1 = tr32(NVRAM_CFG1);
12023
12024         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12025         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12026         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12027                 tp->nvram_jedecnum = JEDEC_ATMEL;
12028                 tg3_flag_set(tp, NVRAM_BUFFERED);
12029                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12030
12031                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032                 tw32(NVRAM_CFG1, nvcfg1);
12033                 return;
12034         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12035         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12036         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12037         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12038         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12039         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12040         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12041                 tp->nvram_jedecnum = JEDEC_ATMEL;
12042                 tg3_flag_set(tp, NVRAM_BUFFERED);
12043                 tg3_flag_set(tp, FLASH);
12044
12045                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12046                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12047                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12048                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12049                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12050                         break;
12051                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12052                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12053                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12054                         break;
12055                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12056                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12057                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12058                         break;
12059                 }
12060                 break;
12061         case FLASH_5752VENDOR_ST_M45PE10:
12062         case FLASH_5752VENDOR_ST_M45PE20:
12063         case FLASH_5752VENDOR_ST_M45PE40:
12064                 tp->nvram_jedecnum = JEDEC_ST;
12065                 tg3_flag_set(tp, NVRAM_BUFFERED);
12066                 tg3_flag_set(tp, FLASH);
12067
12068                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12069                 case FLASH_5752VENDOR_ST_M45PE10:
12070                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12071                         break;
12072                 case FLASH_5752VENDOR_ST_M45PE20:
12073                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12074                         break;
12075                 case FLASH_5752VENDOR_ST_M45PE40:
12076                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12077                         break;
12078                 }
12079                 break;
12080         default:
12081                 tg3_flag_set(tp, NO_NVRAM);
12082                 return;
12083         }
12084
12085         tg3_nvram_get_pagesize(tp, nvcfg1);
12086         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12087                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12088 }
12089
12090
12091 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12092 {
12093         u32 nvcfg1;
12094
12095         nvcfg1 = tr32(NVRAM_CFG1);
12096
12097         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098         case FLASH_5717VENDOR_ATMEL_EEPROM:
12099         case FLASH_5717VENDOR_MICRO_EEPROM:
12100                 tp->nvram_jedecnum = JEDEC_ATMEL;
12101                 tg3_flag_set(tp, NVRAM_BUFFERED);
12102                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12103
12104                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12105                 tw32(NVRAM_CFG1, nvcfg1);
12106                 return;
12107         case FLASH_5717VENDOR_ATMEL_MDB011D:
12108         case FLASH_5717VENDOR_ATMEL_ADB011B:
12109         case FLASH_5717VENDOR_ATMEL_ADB011D:
12110         case FLASH_5717VENDOR_ATMEL_MDB021D:
12111         case FLASH_5717VENDOR_ATMEL_ADB021B:
12112         case FLASH_5717VENDOR_ATMEL_ADB021D:
12113         case FLASH_5717VENDOR_ATMEL_45USPT:
12114                 tp->nvram_jedecnum = JEDEC_ATMEL;
12115                 tg3_flag_set(tp, NVRAM_BUFFERED);
12116                 tg3_flag_set(tp, FLASH);
12117
12118                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12119                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12120                         /* Detect size with tg3_nvram_get_size() */
12121                         break;
12122                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12123                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12124                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12125                         break;
12126                 default:
12127                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12128                         break;
12129                 }
12130                 break;
12131         case FLASH_5717VENDOR_ST_M_M25PE10:
12132         case FLASH_5717VENDOR_ST_A_M25PE10:
12133         case FLASH_5717VENDOR_ST_M_M45PE10:
12134         case FLASH_5717VENDOR_ST_A_M45PE10:
12135         case FLASH_5717VENDOR_ST_M_M25PE20:
12136         case FLASH_5717VENDOR_ST_A_M25PE20:
12137         case FLASH_5717VENDOR_ST_M_M45PE20:
12138         case FLASH_5717VENDOR_ST_A_M45PE20:
12139         case FLASH_5717VENDOR_ST_25USPT:
12140         case FLASH_5717VENDOR_ST_45USPT:
12141                 tp->nvram_jedecnum = JEDEC_ST;
12142                 tg3_flag_set(tp, NVRAM_BUFFERED);
12143                 tg3_flag_set(tp, FLASH);
12144
12145                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12146                 case FLASH_5717VENDOR_ST_M_M25PE20:
12147                 case FLASH_5717VENDOR_ST_M_M45PE20:
12148                         /* Detect size with tg3_nvram_get_size() */
12149                         break;
12150                 case FLASH_5717VENDOR_ST_A_M25PE20:
12151                 case FLASH_5717VENDOR_ST_A_M45PE20:
12152                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12153                         break;
12154                 default:
12155                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12156                         break;
12157                 }
12158                 break;
12159         default:
12160                 tg3_flag_set(tp, NO_NVRAM);
12161                 return;
12162         }
12163
12164         tg3_nvram_get_pagesize(tp, nvcfg1);
12165         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12166                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12167 }
12168
12169 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12170 {
12171         u32 nvcfg1, nvmpinstrp;
12172
12173         nvcfg1 = tr32(NVRAM_CFG1);
12174         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12175
12176         switch (nvmpinstrp) {
12177         case FLASH_5720_EEPROM_HD:
12178         case FLASH_5720_EEPROM_LD:
12179                 tp->nvram_jedecnum = JEDEC_ATMEL;
12180                 tg3_flag_set(tp, NVRAM_BUFFERED);
12181
12182                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12183                 tw32(NVRAM_CFG1, nvcfg1);
12184                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12185                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12186                 else
12187                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12188                 return;
12189         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12190         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12191         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12192         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12193         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12194         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12195         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12196         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12197         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12198         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12199         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12200         case FLASH_5720VENDOR_ATMEL_45USPT:
12201                 tp->nvram_jedecnum = JEDEC_ATMEL;
12202                 tg3_flag_set(tp, NVRAM_BUFFERED);
12203                 tg3_flag_set(tp, FLASH);
12204
12205                 switch (nvmpinstrp) {
12206                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12207                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12208                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12209                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12210                         break;
12211                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12212                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12213                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12214                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12215                         break;
12216                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12217                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12218                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12219                         break;
12220                 default:
12221                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12222                         break;
12223                 }
12224                 break;
12225         case FLASH_5720VENDOR_M_ST_M25PE10:
12226         case FLASH_5720VENDOR_M_ST_M45PE10:
12227         case FLASH_5720VENDOR_A_ST_M25PE10:
12228         case FLASH_5720VENDOR_A_ST_M45PE10:
12229         case FLASH_5720VENDOR_M_ST_M25PE20:
12230         case FLASH_5720VENDOR_M_ST_M45PE20:
12231         case FLASH_5720VENDOR_A_ST_M25PE20:
12232         case FLASH_5720VENDOR_A_ST_M45PE20:
12233         case FLASH_5720VENDOR_M_ST_M25PE40:
12234         case FLASH_5720VENDOR_M_ST_M45PE40:
12235         case FLASH_5720VENDOR_A_ST_M25PE40:
12236         case FLASH_5720VENDOR_A_ST_M45PE40:
12237         case FLASH_5720VENDOR_M_ST_M25PE80:
12238         case FLASH_5720VENDOR_M_ST_M45PE80:
12239         case FLASH_5720VENDOR_A_ST_M25PE80:
12240         case FLASH_5720VENDOR_A_ST_M45PE80:
12241         case FLASH_5720VENDOR_ST_25USPT:
12242         case FLASH_5720VENDOR_ST_45USPT:
12243                 tp->nvram_jedecnum = JEDEC_ST;
12244                 tg3_flag_set(tp, NVRAM_BUFFERED);
12245                 tg3_flag_set(tp, FLASH);
12246
12247                 switch (nvmpinstrp) {
12248                 case FLASH_5720VENDOR_M_ST_M25PE20:
12249                 case FLASH_5720VENDOR_M_ST_M45PE20:
12250                 case FLASH_5720VENDOR_A_ST_M25PE20:
12251                 case FLASH_5720VENDOR_A_ST_M45PE20:
12252                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12253                         break;
12254                 case FLASH_5720VENDOR_M_ST_M25PE40:
12255                 case FLASH_5720VENDOR_M_ST_M45PE40:
12256                 case FLASH_5720VENDOR_A_ST_M25PE40:
12257                 case FLASH_5720VENDOR_A_ST_M45PE40:
12258                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12259                         break;
12260                 case FLASH_5720VENDOR_M_ST_M25PE80:
12261                 case FLASH_5720VENDOR_M_ST_M45PE80:
12262                 case FLASH_5720VENDOR_A_ST_M25PE80:
12263                 case FLASH_5720VENDOR_A_ST_M45PE80:
12264                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12265                         break;
12266                 default:
12267                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12268                         break;
12269                 }
12270                 break;
12271         default:
12272                 tg3_flag_set(tp, NO_NVRAM);
12273                 return;
12274         }
12275
12276         tg3_nvram_get_pagesize(tp, nvcfg1);
12277         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12278                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12279 }
12280
12281 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12282 static void __devinit tg3_nvram_init(struct tg3 *tp)
12283 {
12284         tw32_f(GRC_EEPROM_ADDR,
12285              (EEPROM_ADDR_FSM_RESET |
12286               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12287                EEPROM_ADDR_CLKPERD_SHIFT)));
12288
12289         msleep(1);
12290
12291         /* Enable seeprom accesses. */
12292         tw32_f(GRC_LOCAL_CTRL,
12293              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12294         udelay(100);
12295
12296         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12297             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12298                 tg3_flag_set(tp, NVRAM);
12299
12300                 if (tg3_nvram_lock(tp)) {
12301                         netdev_warn(tp->dev,
12302                                     "Cannot get nvram lock, %s failed\n",
12303                                     __func__);
12304                         return;
12305                 }
12306                 tg3_enable_nvram_access(tp);
12307
12308                 tp->nvram_size = 0;
12309
12310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12311                         tg3_get_5752_nvram_info(tp);
12312                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12313                         tg3_get_5755_nvram_info(tp);
12314                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12315                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12316                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12317                         tg3_get_5787_nvram_info(tp);
12318                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12319                         tg3_get_5761_nvram_info(tp);
12320                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12321                         tg3_get_5906_nvram_info(tp);
12322                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12323                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12324                         tg3_get_57780_nvram_info(tp);
12325                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12326                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12327                         tg3_get_5717_nvram_info(tp);
12328                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12329                         tg3_get_5720_nvram_info(tp);
12330                 else
12331                         tg3_get_nvram_info(tp);
12332
12333                 if (tp->nvram_size == 0)
12334                         tg3_get_nvram_size(tp);
12335
12336                 tg3_disable_nvram_access(tp);
12337                 tg3_nvram_unlock(tp);
12338
12339         } else {
12340                 tg3_flag_clear(tp, NVRAM);
12341                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12342
12343                 tg3_get_eeprom_size(tp);
12344         }
12345 }
12346
12347 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12348                                     u32 offset, u32 len, u8 *buf)
12349 {
12350         int i, j, rc = 0;
12351         u32 val;
12352
12353         for (i = 0; i < len; i += 4) {
12354                 u32 addr;
12355                 __be32 data;
12356
12357                 addr = offset + i;
12358
12359                 memcpy(&data, buf + i, 4);
12360
12361                 /*
12362                  * The SEEPROM interface expects the data to always be opposite
12363                  * the native endian format.  We accomplish this by reversing
12364                  * all the operations that would have been performed on the
12365                  * data from a call to tg3_nvram_read_be32().
12366                  */
12367                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12368
12369                 val = tr32(GRC_EEPROM_ADDR);
12370                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12371
12372                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12373                         EEPROM_ADDR_READ);
12374                 tw32(GRC_EEPROM_ADDR, val |
12375                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12376                         (addr & EEPROM_ADDR_ADDR_MASK) |
12377                         EEPROM_ADDR_START |
12378                         EEPROM_ADDR_WRITE);
12379
12380                 for (j = 0; j < 1000; j++) {
12381                         val = tr32(GRC_EEPROM_ADDR);
12382
12383                         if (val & EEPROM_ADDR_COMPLETE)
12384                                 break;
12385                         msleep(1);
12386                 }
12387                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12388                         rc = -EBUSY;
12389                         break;
12390                 }
12391         }
12392
12393         return rc;
12394 }
12395
12396 /* offset and length are dword aligned */
12397 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12398                 u8 *buf)
12399 {
12400         int ret = 0;
12401         u32 pagesize = tp->nvram_pagesize;
12402         u32 pagemask = pagesize - 1;
12403         u32 nvram_cmd;
12404         u8 *tmp;
12405
12406         tmp = kmalloc(pagesize, GFP_KERNEL);
12407         if (tmp == NULL)
12408                 return -ENOMEM;
12409
12410         while (len) {
12411                 int j;
12412                 u32 phy_addr, page_off, size;
12413
12414                 phy_addr = offset & ~pagemask;
12415
12416                 for (j = 0; j < pagesize; j += 4) {
12417                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12418                                                   (__be32 *) (tmp + j));
12419                         if (ret)
12420                                 break;
12421                 }
12422                 if (ret)
12423                         break;
12424
12425                 page_off = offset & pagemask;
12426                 size = pagesize;
12427                 if (len < size)
12428                         size = len;
12429
12430                 len -= size;
12431
12432                 memcpy(tmp + page_off, buf, size);
12433
12434                 offset = offset + (pagesize - page_off);
12435
12436                 tg3_enable_nvram_access(tp);
12437
12438                 /*
12439                  * Before we can erase the flash page, we need
12440                  * to issue a special "write enable" command.
12441                  */
12442                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12443
12444                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12445                         break;
12446
12447                 /* Erase the target page */
12448                 tw32(NVRAM_ADDR, phy_addr);
12449
12450                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12451                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12452
12453                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12454                         break;
12455
12456                 /* Issue another write enable to start the write. */
12457                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12458
12459                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12460                         break;
12461
12462                 for (j = 0; j < pagesize; j += 4) {
12463                         __be32 data;
12464
12465                         data = *((__be32 *) (tmp + j));
12466
12467                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12468
12469                         tw32(NVRAM_ADDR, phy_addr + j);
12470
12471                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12472                                 NVRAM_CMD_WR;
12473
12474                         if (j == 0)
12475                                 nvram_cmd |= NVRAM_CMD_FIRST;
12476                         else if (j == (pagesize - 4))
12477                                 nvram_cmd |= NVRAM_CMD_LAST;
12478
12479                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12480                                 break;
12481                 }
12482                 if (ret)
12483                         break;
12484         }
12485
12486         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12487         tg3_nvram_exec_cmd(tp, nvram_cmd);
12488
12489         kfree(tmp);
12490
12491         return ret;
12492 }
12493
12494 /* offset and length are dword aligned */
12495 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12496                 u8 *buf)
12497 {
12498         int i, ret = 0;
12499
12500         for (i = 0; i < len; i += 4, offset += 4) {
12501                 u32 page_off, phy_addr, nvram_cmd;
12502                 __be32 data;
12503
12504                 memcpy(&data, buf + i, 4);
12505                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12506
12507                 page_off = offset % tp->nvram_pagesize;
12508
12509                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12510
12511                 tw32(NVRAM_ADDR, phy_addr);
12512
12513                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12514
12515                 if (page_off == 0 || i == 0)
12516                         nvram_cmd |= NVRAM_CMD_FIRST;
12517                 if (page_off == (tp->nvram_pagesize - 4))
12518                         nvram_cmd |= NVRAM_CMD_LAST;
12519
12520                 if (i == (len - 4))
12521                         nvram_cmd |= NVRAM_CMD_LAST;
12522
12523                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12524                     !tg3_flag(tp, 5755_PLUS) &&
12525                     (tp->nvram_jedecnum == JEDEC_ST) &&
12526                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12527
12528                         if ((ret = tg3_nvram_exec_cmd(tp,
12529                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12530                                 NVRAM_CMD_DONE)))
12531
12532                                 break;
12533                 }
12534                 if (!tg3_flag(tp, FLASH)) {
12535                         /* We always do complete word writes to eeprom. */
12536                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12537                 }
12538
12539                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12540                         break;
12541         }
12542         return ret;
12543 }
12544
12545 /* offset and length are dword aligned */
12546 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12547 {
12548         int ret;
12549
12550         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12551                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12552                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12553                 udelay(40);
12554         }
12555
12556         if (!tg3_flag(tp, NVRAM)) {
12557                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12558         } else {
12559                 u32 grc_mode;
12560
12561                 ret = tg3_nvram_lock(tp);
12562                 if (ret)
12563                         return ret;
12564
12565                 tg3_enable_nvram_access(tp);
12566                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12567                         tw32(NVRAM_WRITE1, 0x406);
12568
12569                 grc_mode = tr32(GRC_MODE);
12570                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12571
12572                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12573                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12574                                 buf);
12575                 } else {
12576                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12577                                 buf);
12578                 }
12579
12580                 grc_mode = tr32(GRC_MODE);
12581                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12582
12583                 tg3_disable_nvram_access(tp);
12584                 tg3_nvram_unlock(tp);
12585         }
12586
12587         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12588                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12589                 udelay(40);
12590         }
12591
12592         return ret;
12593 }
12594
12595 struct subsys_tbl_ent {
12596         u16 subsys_vendor, subsys_devid;
12597         u32 phy_id;
12598 };
12599
12600 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12601         /* Broadcom boards. */
12602         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12603           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12604         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12605           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12606         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12607           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12608         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12609           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12610         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12611           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12612         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12613           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12614         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12615           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12616         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12617           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12618         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12619           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12620         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12621           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12622         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12623           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12624
12625         /* 3com boards. */
12626         { TG3PCI_SUBVENDOR_ID_3COM,
12627           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12628         { TG3PCI_SUBVENDOR_ID_3COM,
12629           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12630         { TG3PCI_SUBVENDOR_ID_3COM,
12631           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12632         { TG3PCI_SUBVENDOR_ID_3COM,
12633           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12634         { TG3PCI_SUBVENDOR_ID_3COM,
12635           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12636
12637         /* DELL boards. */
12638         { TG3PCI_SUBVENDOR_ID_DELL,
12639           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12640         { TG3PCI_SUBVENDOR_ID_DELL,
12641           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12642         { TG3PCI_SUBVENDOR_ID_DELL,
12643           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12644         { TG3PCI_SUBVENDOR_ID_DELL,
12645           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12646
12647         /* Compaq boards. */
12648         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12649           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12650         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12651           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12652         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12653           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12654         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12655           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12656         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12657           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12658
12659         /* IBM boards. */
12660         { TG3PCI_SUBVENDOR_ID_IBM,
12661           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12662 };
12663
12664 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12665 {
12666         int i;
12667
12668         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12669                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12670                      tp->pdev->subsystem_vendor) &&
12671                     (subsys_id_to_phy_id[i].subsys_devid ==
12672                      tp->pdev->subsystem_device))
12673                         return &subsys_id_to_phy_id[i];
12674         }
12675         return NULL;
12676 }
12677
12678 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12679 {
12680         u32 val;
12681         u16 pmcsr;
12682
12683         /* On some early chips the SRAM cannot be accessed in D3hot state,
12684          * so need make sure we're in D0.
12685          */
12686         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12687         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12688         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12689         msleep(1);
12690
12691         /* Make sure register accesses (indirect or otherwise)
12692          * will function correctly.
12693          */
12694         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12695                                tp->misc_host_ctrl);
12696
12697         /* The memory arbiter has to be enabled in order for SRAM accesses
12698          * to succeed.  Normally on powerup the tg3 chip firmware will make
12699          * sure it is enabled, but other entities such as system netboot
12700          * code might disable it.
12701          */
12702         val = tr32(MEMARB_MODE);
12703         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12704
12705         tp->phy_id = TG3_PHY_ID_INVALID;
12706         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12707
12708         /* Assume an onboard device and WOL capable by default.  */
12709         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12710         tg3_flag_set(tp, WOL_CAP);
12711
12712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12713                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12714                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12715                         tg3_flag_set(tp, IS_NIC);
12716                 }
12717                 val = tr32(VCPU_CFGSHDW);
12718                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12719                         tg3_flag_set(tp, ASPM_WORKAROUND);
12720                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12721                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12722                         tg3_flag_set(tp, WOL_ENABLE);
12723                         device_set_wakeup_enable(&tp->pdev->dev, true);
12724                 }
12725                 goto done;
12726         }
12727
12728         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12729         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12730                 u32 nic_cfg, led_cfg;
12731                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12732                 int eeprom_phy_serdes = 0;
12733
12734                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12735                 tp->nic_sram_data_cfg = nic_cfg;
12736
12737                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12738                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12739                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12740                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12741                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12742                     (ver > 0) && (ver < 0x100))
12743                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12744
12745                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12746                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12747
12748                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12749                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12750                         eeprom_phy_serdes = 1;
12751
12752                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12753                 if (nic_phy_id != 0) {
12754                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12755                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12756
12757                         eeprom_phy_id  = (id1 >> 16) << 10;
12758                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12759                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12760                 } else
12761                         eeprom_phy_id = 0;
12762
12763                 tp->phy_id = eeprom_phy_id;
12764                 if (eeprom_phy_serdes) {
12765                         if (!tg3_flag(tp, 5705_PLUS))
12766                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12767                         else
12768                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12769                 }
12770
12771                 if (tg3_flag(tp, 5750_PLUS))
12772                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12773                                     SHASTA_EXT_LED_MODE_MASK);
12774                 else
12775                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12776
12777                 switch (led_cfg) {
12778                 default:
12779                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12780                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12781                         break;
12782
12783                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12784                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12785                         break;
12786
12787                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12788                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12789
12790                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12791                          * read on some older 5700/5701 bootcode.
12792                          */
12793                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12794                             ASIC_REV_5700 ||
12795                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12796                             ASIC_REV_5701)
12797                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12798
12799                         break;
12800
12801                 case SHASTA_EXT_LED_SHARED:
12802                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12803                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12804                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12805                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12806                                                  LED_CTRL_MODE_PHY_2);
12807                         break;
12808
12809                 case SHASTA_EXT_LED_MAC:
12810                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12811                         break;
12812
12813                 case SHASTA_EXT_LED_COMBO:
12814                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12815                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12816                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12817                                                  LED_CTRL_MODE_PHY_2);
12818                         break;
12819
12820                 }
12821
12822                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12823                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12824                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12825                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12826
12827                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12828                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12829
12830                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12831                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12832                         if ((tp->pdev->subsystem_vendor ==
12833                              PCI_VENDOR_ID_ARIMA) &&
12834                             (tp->pdev->subsystem_device == 0x205a ||
12835                              tp->pdev->subsystem_device == 0x2063))
12836                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12837                 } else {
12838                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12839                         tg3_flag_set(tp, IS_NIC);
12840                 }
12841
12842                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12843                         tg3_flag_set(tp, ENABLE_ASF);
12844                         if (tg3_flag(tp, 5750_PLUS))
12845                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12846                 }
12847
12848                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12849                     tg3_flag(tp, 5750_PLUS))
12850                         tg3_flag_set(tp, ENABLE_APE);
12851
12852                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12853                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12854                         tg3_flag_clear(tp, WOL_CAP);
12855
12856                 if (tg3_flag(tp, WOL_CAP) &&
12857                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12858                         tg3_flag_set(tp, WOL_ENABLE);
12859                         device_set_wakeup_enable(&tp->pdev->dev, true);
12860                 }
12861
12862                 if (cfg2 & (1 << 17))
12863                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12864
12865                 /* serdes signal pre-emphasis in register 0x590 set by */
12866                 /* bootcode if bit 18 is set */
12867                 if (cfg2 & (1 << 18))
12868                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12869
12870                 if ((tg3_flag(tp, 57765_PLUS) ||
12871                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12872                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12873                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12874                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12875
12876                 if (tg3_flag(tp, PCI_EXPRESS) &&
12877                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12878                     !tg3_flag(tp, 57765_PLUS)) {
12879                         u32 cfg3;
12880
12881                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12882                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12883                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12884                 }
12885
12886                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12887                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12888                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12889                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12890                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12891                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12892         }
12893 done:
12894         if (tg3_flag(tp, WOL_CAP))
12895                 device_set_wakeup_enable(&tp->pdev->dev,
12896                                          tg3_flag(tp, WOL_ENABLE));
12897         else
12898                 device_set_wakeup_capable(&tp->pdev->dev, false);
12899 }
12900
12901 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12902 {
12903         int i;
12904         u32 val;
12905
12906         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12907         tw32(OTP_CTRL, cmd);
12908
12909         /* Wait for up to 1 ms for command to execute. */
12910         for (i = 0; i < 100; i++) {
12911                 val = tr32(OTP_STATUS);
12912                 if (val & OTP_STATUS_CMD_DONE)
12913                         break;
12914                 udelay(10);
12915         }
12916
12917         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12918 }
12919
12920 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12921  * configuration is a 32-bit value that straddles the alignment boundary.
12922  * We do two 32-bit reads and then shift and merge the results.
12923  */
12924 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12925 {
12926         u32 bhalf_otp, thalf_otp;
12927
12928         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12929
12930         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12931                 return 0;
12932
12933         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12934
12935         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12936                 return 0;
12937
12938         thalf_otp = tr32(OTP_READ_DATA);
12939
12940         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12941
12942         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12943                 return 0;
12944
12945         bhalf_otp = tr32(OTP_READ_DATA);
12946
12947         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12948 }
12949
12950 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12951 {
12952         u32 adv = ADVERTISED_Autoneg |
12953                   ADVERTISED_Pause;
12954
12955         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12956                 adv |= ADVERTISED_1000baseT_Half |
12957                        ADVERTISED_1000baseT_Full;
12958
12959         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12960                 adv |= ADVERTISED_100baseT_Half |
12961                        ADVERTISED_100baseT_Full |
12962                        ADVERTISED_10baseT_Half |
12963                        ADVERTISED_10baseT_Full |
12964                        ADVERTISED_TP;
12965         else
12966                 adv |= ADVERTISED_FIBRE;
12967
12968         tp->link_config.advertising = adv;
12969         tp->link_config.speed = SPEED_INVALID;
12970         tp->link_config.duplex = DUPLEX_INVALID;
12971         tp->link_config.autoneg = AUTONEG_ENABLE;
12972         tp->link_config.active_speed = SPEED_INVALID;
12973         tp->link_config.active_duplex = DUPLEX_INVALID;
12974         tp->link_config.orig_speed = SPEED_INVALID;
12975         tp->link_config.orig_duplex = DUPLEX_INVALID;
12976         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12977 }
12978
12979 static int __devinit tg3_phy_probe(struct tg3 *tp)
12980 {
12981         u32 hw_phy_id_1, hw_phy_id_2;
12982         u32 hw_phy_id, hw_phy_id_masked;
12983         int err;
12984
12985         /* flow control autonegotiation is default behavior */
12986         tg3_flag_set(tp, PAUSE_AUTONEG);
12987         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12988
12989         if (tg3_flag(tp, USE_PHYLIB))
12990                 return tg3_phy_init(tp);
12991
12992         /* Reading the PHY ID register can conflict with ASF
12993          * firmware access to the PHY hardware.
12994          */
12995         err = 0;
12996         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12997                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12998         } else {
12999                 /* Now read the physical PHY_ID from the chip and verify
13000                  * that it is sane.  If it doesn't look good, we fall back
13001                  * to either the hard-coded table based PHY_ID and failing
13002                  * that the value found in the eeprom area.
13003                  */
13004                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13005                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13006
13007                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13008                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13009                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13010
13011                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13012         }
13013
13014         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13015                 tp->phy_id = hw_phy_id;
13016                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13017                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13018                 else
13019                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13020         } else {
13021                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13022                         /* Do nothing, phy ID already set up in
13023                          * tg3_get_eeprom_hw_cfg().
13024                          */
13025                 } else {
13026                         struct subsys_tbl_ent *p;
13027
13028                         /* No eeprom signature?  Try the hardcoded
13029                          * subsys device table.
13030                          */
13031                         p = tg3_lookup_by_subsys(tp);
13032                         if (!p)
13033                                 return -ENODEV;
13034
13035                         tp->phy_id = p->phy_id;
13036                         if (!tp->phy_id ||
13037                             tp->phy_id == TG3_PHY_ID_BCM8002)
13038                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13039                 }
13040         }
13041
13042         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13043             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13044               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13045              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13046               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13047                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13048
13049         tg3_phy_init_link_config(tp);
13050
13051         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13052             !tg3_flag(tp, ENABLE_APE) &&
13053             !tg3_flag(tp, ENABLE_ASF)) {
13054                 u32 bmsr, mask;
13055
13056                 tg3_readphy(tp, MII_BMSR, &bmsr);
13057                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13058                     (bmsr & BMSR_LSTATUS))
13059                         goto skip_phy_reset;
13060
13061                 err = tg3_phy_reset(tp);
13062                 if (err)
13063                         return err;
13064
13065                 tg3_phy_set_wirespeed(tp);
13066
13067                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13068                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13069                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13070                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13071                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13072                                             tp->link_config.flowctrl);
13073
13074                         tg3_writephy(tp, MII_BMCR,
13075                                      BMCR_ANENABLE | BMCR_ANRESTART);
13076                 }
13077         }
13078
13079 skip_phy_reset:
13080         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13081                 err = tg3_init_5401phy_dsp(tp);
13082                 if (err)
13083                         return err;
13084
13085                 err = tg3_init_5401phy_dsp(tp);
13086         }
13087
13088         return err;
13089 }
13090
13091 static void __devinit tg3_read_vpd(struct tg3 *tp)
13092 {
13093         u8 *vpd_data;
13094         unsigned int block_end, rosize, len;
13095         int j, i = 0;
13096
13097         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13098         if (!vpd_data)
13099                 goto out_no_vpd;
13100
13101         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13102                              PCI_VPD_LRDT_RO_DATA);
13103         if (i < 0)
13104                 goto out_not_found;
13105
13106         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13107         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13108         i += PCI_VPD_LRDT_TAG_SIZE;
13109
13110         if (block_end > TG3_NVM_VPD_LEN)
13111                 goto out_not_found;
13112
13113         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13114                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13115         if (j > 0) {
13116                 len = pci_vpd_info_field_size(&vpd_data[j]);
13117
13118                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13119                 if (j + len > block_end || len != 4 ||
13120                     memcmp(&vpd_data[j], "1028", 4))
13121                         goto partno;
13122
13123                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13124                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13125                 if (j < 0)
13126                         goto partno;
13127
13128                 len = pci_vpd_info_field_size(&vpd_data[j]);
13129
13130                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13131                 if (j + len > block_end)
13132                         goto partno;
13133
13134                 memcpy(tp->fw_ver, &vpd_data[j], len);
13135                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13136         }
13137
13138 partno:
13139         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13140                                       PCI_VPD_RO_KEYWORD_PARTNO);
13141         if (i < 0)
13142                 goto out_not_found;
13143
13144         len = pci_vpd_info_field_size(&vpd_data[i]);
13145
13146         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13147         if (len > TG3_BPN_SIZE ||
13148             (len + i) > TG3_NVM_VPD_LEN)
13149                 goto out_not_found;
13150
13151         memcpy(tp->board_part_number, &vpd_data[i], len);
13152
13153 out_not_found:
13154         kfree(vpd_data);
13155         if (tp->board_part_number[0])
13156                 return;
13157
13158 out_no_vpd:
13159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13160                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13161                         strcpy(tp->board_part_number, "BCM5717");
13162                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13163                         strcpy(tp->board_part_number, "BCM5718");
13164                 else
13165                         goto nomatch;
13166         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13167                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13168                         strcpy(tp->board_part_number, "BCM57780");
13169                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13170                         strcpy(tp->board_part_number, "BCM57760");
13171                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13172                         strcpy(tp->board_part_number, "BCM57790");
13173                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13174                         strcpy(tp->board_part_number, "BCM57788");
13175                 else
13176                         goto nomatch;
13177         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13178                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13179                         strcpy(tp->board_part_number, "BCM57761");
13180                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13181                         strcpy(tp->board_part_number, "BCM57765");
13182                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13183                         strcpy(tp->board_part_number, "BCM57781");
13184                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13185                         strcpy(tp->board_part_number, "BCM57785");
13186                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13187                         strcpy(tp->board_part_number, "BCM57791");
13188                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13189                         strcpy(tp->board_part_number, "BCM57795");
13190                 else
13191                         goto nomatch;
13192         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13193                 strcpy(tp->board_part_number, "BCM95906");
13194         } else {
13195 nomatch:
13196                 strcpy(tp->board_part_number, "none");
13197         }
13198 }
13199
13200 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13201 {
13202         u32 val;
13203
13204         if (tg3_nvram_read(tp, offset, &val) ||
13205             (val & 0xfc000000) != 0x0c000000 ||
13206             tg3_nvram_read(tp, offset + 4, &val) ||
13207             val != 0)
13208                 return 0;
13209
13210         return 1;
13211 }
13212
13213 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13214 {
13215         u32 val, offset, start, ver_offset;
13216         int i, dst_off;
13217         bool newver = false;
13218
13219         if (tg3_nvram_read(tp, 0xc, &offset) ||
13220             tg3_nvram_read(tp, 0x4, &start))
13221                 return;
13222
13223         offset = tg3_nvram_logical_addr(tp, offset);
13224
13225         if (tg3_nvram_read(tp, offset, &val))
13226                 return;
13227
13228         if ((val & 0xfc000000) == 0x0c000000) {
13229                 if (tg3_nvram_read(tp, offset + 4, &val))
13230                         return;
13231
13232                 if (val == 0)
13233                         newver = true;
13234         }
13235
13236         dst_off = strlen(tp->fw_ver);
13237
13238         if (newver) {
13239                 if (TG3_VER_SIZE - dst_off < 16 ||
13240                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13241                         return;
13242
13243                 offset = offset + ver_offset - start;
13244                 for (i = 0; i < 16; i += 4) {
13245                         __be32 v;
13246                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13247                                 return;
13248
13249                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13250                 }
13251         } else {
13252                 u32 major, minor;
13253
13254                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13255                         return;
13256
13257                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13258                         TG3_NVM_BCVER_MAJSFT;
13259                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13260                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13261                          "v%d.%02d", major, minor);
13262         }
13263 }
13264
13265 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13266 {
13267         u32 val, major, minor;
13268
13269         /* Use native endian representation */
13270         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13271                 return;
13272
13273         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13274                 TG3_NVM_HWSB_CFG1_MAJSFT;
13275         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13276                 TG3_NVM_HWSB_CFG1_MINSFT;
13277
13278         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13279 }
13280
13281 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13282 {
13283         u32 offset, major, minor, build;
13284
13285         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13286
13287         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13288                 return;
13289
13290         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13291         case TG3_EEPROM_SB_REVISION_0:
13292                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13293                 break;
13294         case TG3_EEPROM_SB_REVISION_2:
13295                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13296                 break;
13297         case TG3_EEPROM_SB_REVISION_3:
13298                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13299                 break;
13300         case TG3_EEPROM_SB_REVISION_4:
13301                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13302                 break;
13303         case TG3_EEPROM_SB_REVISION_5:
13304                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13305                 break;
13306         case TG3_EEPROM_SB_REVISION_6:
13307                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13308                 break;
13309         default:
13310                 return;
13311         }
13312
13313         if (tg3_nvram_read(tp, offset, &val))
13314                 return;
13315
13316         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13317                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13318         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13319                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13320         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13321
13322         if (minor > 99 || build > 26)
13323                 return;
13324
13325         offset = strlen(tp->fw_ver);
13326         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13327                  " v%d.%02d", major, minor);
13328
13329         if (build > 0) {
13330                 offset = strlen(tp->fw_ver);
13331                 if (offset < TG3_VER_SIZE - 1)
13332                         tp->fw_ver[offset] = 'a' + build - 1;
13333         }
13334 }
13335
13336 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13337 {
13338         u32 val, offset, start;
13339         int i, vlen;
13340
13341         for (offset = TG3_NVM_DIR_START;
13342              offset < TG3_NVM_DIR_END;
13343              offset += TG3_NVM_DIRENT_SIZE) {
13344                 if (tg3_nvram_read(tp, offset, &val))
13345                         return;
13346
13347                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13348                         break;
13349         }
13350
13351         if (offset == TG3_NVM_DIR_END)
13352                 return;
13353
13354         if (!tg3_flag(tp, 5705_PLUS))
13355                 start = 0x08000000;
13356         else if (tg3_nvram_read(tp, offset - 4, &start))
13357                 return;
13358
13359         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13360             !tg3_fw_img_is_valid(tp, offset) ||
13361             tg3_nvram_read(tp, offset + 8, &val))
13362                 return;
13363
13364         offset += val - start;
13365
13366         vlen = strlen(tp->fw_ver);
13367
13368         tp->fw_ver[vlen++] = ',';
13369         tp->fw_ver[vlen++] = ' ';
13370
13371         for (i = 0; i < 4; i++) {
13372                 __be32 v;
13373                 if (tg3_nvram_read_be32(tp, offset, &v))
13374                         return;
13375
13376                 offset += sizeof(v);
13377
13378                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13379                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13380                         break;
13381                 }
13382
13383                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13384                 vlen += sizeof(v);
13385         }
13386 }
13387
13388 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13389 {
13390         int vlen;
13391         u32 apedata;
13392         char *fwtype;
13393
13394         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13395                 return;
13396
13397         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13398         if (apedata != APE_SEG_SIG_MAGIC)
13399                 return;
13400
13401         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13402         if (!(apedata & APE_FW_STATUS_READY))
13403                 return;
13404
13405         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13406
13407         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13408                 tg3_flag_set(tp, APE_HAS_NCSI);
13409                 fwtype = "NCSI";
13410         } else {
13411                 fwtype = "DASH";
13412         }
13413
13414         vlen = strlen(tp->fw_ver);
13415
13416         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13417                  fwtype,
13418                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13419                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13420                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13421                  (apedata & APE_FW_VERSION_BLDMSK));
13422 }
13423
13424 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13425 {
13426         u32 val;
13427         bool vpd_vers = false;
13428
13429         if (tp->fw_ver[0] != 0)
13430                 vpd_vers = true;
13431
13432         if (tg3_flag(tp, NO_NVRAM)) {
13433                 strcat(tp->fw_ver, "sb");
13434                 return;
13435         }
13436
13437         if (tg3_nvram_read(tp, 0, &val))
13438                 return;
13439
13440         if (val == TG3_EEPROM_MAGIC)
13441                 tg3_read_bc_ver(tp);
13442         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13443                 tg3_read_sb_ver(tp, val);
13444         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13445                 tg3_read_hwsb_ver(tp);
13446         else
13447                 return;
13448
13449         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13450                 goto done;
13451
13452         tg3_read_mgmtfw_ver(tp);
13453
13454 done:
13455         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13456 }
13457
13458 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13459
13460 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13461 {
13462         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13463                 return TG3_RX_RET_MAX_SIZE_5717;
13464         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13465                 return TG3_RX_RET_MAX_SIZE_5700;
13466         else
13467                 return TG3_RX_RET_MAX_SIZE_5705;
13468 }
13469
13470 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13471         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13472         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13473         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13474         { },
13475 };
13476
13477 static int __devinit tg3_get_invariants(struct tg3 *tp)
13478 {
13479         u32 misc_ctrl_reg;
13480         u32 pci_state_reg, grc_misc_cfg;
13481         u32 val;
13482         u16 pci_cmd;
13483         int err;
13484
13485         /* Force memory write invalidate off.  If we leave it on,
13486          * then on 5700_BX chips we have to enable a workaround.
13487          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13488          * to match the cacheline size.  The Broadcom driver have this
13489          * workaround but turns MWI off all the times so never uses
13490          * it.  This seems to suggest that the workaround is insufficient.
13491          */
13492         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13493         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13494         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13495
13496         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13497          * has the register indirect write enable bit set before
13498          * we try to access any of the MMIO registers.  It is also
13499          * critical that the PCI-X hw workaround situation is decided
13500          * before that as well.
13501          */
13502         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13503                               &misc_ctrl_reg);
13504
13505         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13506                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13508                 u32 prod_id_asic_rev;
13509
13510                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13511                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13512                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13513                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13514                         pci_read_config_dword(tp->pdev,
13515                                               TG3PCI_GEN2_PRODID_ASICREV,
13516                                               &prod_id_asic_rev);
13517                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13518                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13519                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13520                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13521                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13522                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13523                         pci_read_config_dword(tp->pdev,
13524                                               TG3PCI_GEN15_PRODID_ASICREV,
13525                                               &prod_id_asic_rev);
13526                 else
13527                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13528                                               &prod_id_asic_rev);
13529
13530                 tp->pci_chip_rev_id = prod_id_asic_rev;
13531         }
13532
13533         /* Wrong chip ID in 5752 A0. This code can be removed later
13534          * as A0 is not in production.
13535          */
13536         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13537                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13538
13539         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13540          * we need to disable memory and use config. cycles
13541          * only to access all registers. The 5702/03 chips
13542          * can mistakenly decode the special cycles from the
13543          * ICH chipsets as memory write cycles, causing corruption
13544          * of register and memory space. Only certain ICH bridges
13545          * will drive special cycles with non-zero data during the
13546          * address phase which can fall within the 5703's address
13547          * range. This is not an ICH bug as the PCI spec allows
13548          * non-zero address during special cycles. However, only
13549          * these ICH bridges are known to drive non-zero addresses
13550          * during special cycles.
13551          *
13552          * Since special cycles do not cross PCI bridges, we only
13553          * enable this workaround if the 5703 is on the secondary
13554          * bus of these ICH bridges.
13555          */
13556         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13557             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13558                 static struct tg3_dev_id {
13559                         u32     vendor;
13560                         u32     device;
13561                         u32     rev;
13562                 } ich_chipsets[] = {
13563                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13564                           PCI_ANY_ID },
13565                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13566                           PCI_ANY_ID },
13567                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13568                           0xa },
13569                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13570                           PCI_ANY_ID },
13571                         { },
13572                 };
13573                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13574                 struct pci_dev *bridge = NULL;
13575
13576                 while (pci_id->vendor != 0) {
13577                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13578                                                 bridge);
13579                         if (!bridge) {
13580                                 pci_id++;
13581                                 continue;
13582                         }
13583                         if (pci_id->rev != PCI_ANY_ID) {
13584                                 if (bridge->revision > pci_id->rev)
13585                                         continue;
13586                         }
13587                         if (bridge->subordinate &&
13588                             (bridge->subordinate->number ==
13589                              tp->pdev->bus->number)) {
13590                                 tg3_flag_set(tp, ICH_WORKAROUND);
13591                                 pci_dev_put(bridge);
13592                                 break;
13593                         }
13594                 }
13595         }
13596
13597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13598                 static struct tg3_dev_id {
13599                         u32     vendor;
13600                         u32     device;
13601                 } bridge_chipsets[] = {
13602                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13603                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13604                         { },
13605                 };
13606                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13607                 struct pci_dev *bridge = NULL;
13608
13609                 while (pci_id->vendor != 0) {
13610                         bridge = pci_get_device(pci_id->vendor,
13611                                                 pci_id->device,
13612                                                 bridge);
13613                         if (!bridge) {
13614                                 pci_id++;
13615                                 continue;
13616                         }
13617                         if (bridge->subordinate &&
13618                             (bridge->subordinate->number <=
13619                              tp->pdev->bus->number) &&
13620                             (bridge->subordinate->subordinate >=
13621                              tp->pdev->bus->number)) {
13622                                 tg3_flag_set(tp, 5701_DMA_BUG);
13623                                 pci_dev_put(bridge);
13624                                 break;
13625                         }
13626                 }
13627         }
13628
13629         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13630          * DMA addresses > 40-bit. This bridge may have other additional
13631          * 57xx devices behind it in some 4-port NIC designs for example.
13632          * Any tg3 device found behind the bridge will also need the 40-bit
13633          * DMA workaround.
13634          */
13635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13637                 tg3_flag_set(tp, 5780_CLASS);
13638                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13639                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13640         } else {
13641                 struct pci_dev *bridge = NULL;
13642
13643                 do {
13644                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13645                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13646                                                 bridge);
13647                         if (bridge && bridge->subordinate &&
13648                             (bridge->subordinate->number <=
13649                              tp->pdev->bus->number) &&
13650                             (bridge->subordinate->subordinate >=
13651                              tp->pdev->bus->number)) {
13652                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13653                                 pci_dev_put(bridge);
13654                                 break;
13655                         }
13656                 } while (bridge);
13657         }
13658
13659         /* Initialize misc host control in PCI block. */
13660         tp->misc_host_ctrl |= (misc_ctrl_reg &
13661                                MISC_HOST_CTRL_CHIPREV);
13662         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13663                                tp->misc_host_ctrl);
13664
13665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13666             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13667             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13668             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13669                 tp->pdev_peer = tg3_find_peer(tp);
13670
13671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13674                 tg3_flag_set(tp, 5717_PLUS);
13675
13676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13677             tg3_flag(tp, 5717_PLUS))
13678                 tg3_flag_set(tp, 57765_PLUS);
13679
13680         /* Intentionally exclude ASIC_REV_5906 */
13681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13684             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13687             tg3_flag(tp, 57765_PLUS))
13688                 tg3_flag_set(tp, 5755_PLUS);
13689
13690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13691             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13692             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13693             tg3_flag(tp, 5755_PLUS) ||
13694             tg3_flag(tp, 5780_CLASS))
13695                 tg3_flag_set(tp, 5750_PLUS);
13696
13697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13698             tg3_flag(tp, 5750_PLUS))
13699                 tg3_flag_set(tp, 5705_PLUS);
13700
13701         /* Determine TSO capabilities */
13702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13703                 ; /* Do nothing. HW bug. */
13704         else if (tg3_flag(tp, 57765_PLUS))
13705                 tg3_flag_set(tp, HW_TSO_3);
13706         else if (tg3_flag(tp, 5755_PLUS) ||
13707                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13708                 tg3_flag_set(tp, HW_TSO_2);
13709         else if (tg3_flag(tp, 5750_PLUS)) {
13710                 tg3_flag_set(tp, HW_TSO_1);
13711                 tg3_flag_set(tp, TSO_BUG);
13712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13713                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13714                         tg3_flag_clear(tp, TSO_BUG);
13715         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13716                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13717                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13718                         tg3_flag_set(tp, TSO_BUG);
13719                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13720                         tp->fw_needed = FIRMWARE_TG3TSO5;
13721                 else
13722                         tp->fw_needed = FIRMWARE_TG3TSO;
13723         }
13724
13725         /* Selectively allow TSO based on operating conditions */
13726         if (tg3_flag(tp, HW_TSO_1) ||
13727             tg3_flag(tp, HW_TSO_2) ||
13728             tg3_flag(tp, HW_TSO_3) ||
13729             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13730                 tg3_flag_set(tp, TSO_CAPABLE);
13731         else {
13732                 tg3_flag_clear(tp, TSO_CAPABLE);
13733                 tg3_flag_clear(tp, TSO_BUG);
13734                 tp->fw_needed = NULL;
13735         }
13736
13737         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13738                 tp->fw_needed = FIRMWARE_TG3;
13739
13740         tp->irq_max = 1;
13741
13742         if (tg3_flag(tp, 5750_PLUS)) {
13743                 tg3_flag_set(tp, SUPPORT_MSI);
13744                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13745                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13746                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13747                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13748                      tp->pdev_peer == tp->pdev))
13749                         tg3_flag_clear(tp, SUPPORT_MSI);
13750
13751                 if (tg3_flag(tp, 5755_PLUS) ||
13752                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13753                         tg3_flag_set(tp, 1SHOT_MSI);
13754                 }
13755
13756                 if (tg3_flag(tp, 57765_PLUS)) {
13757                         tg3_flag_set(tp, SUPPORT_MSIX);
13758                         tp->irq_max = TG3_IRQ_MAX_VECS;
13759                 }
13760         }
13761
13762         if (tg3_flag(tp, 5755_PLUS))
13763                 tg3_flag_set(tp, SHORT_DMA_BUG);
13764
13765         if (tg3_flag(tp, 5717_PLUS))
13766                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13767
13768         if (tg3_flag(tp, 57765_PLUS) &&
13769             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13770                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13771
13772         if (!tg3_flag(tp, 5705_PLUS) ||
13773             tg3_flag(tp, 5780_CLASS) ||
13774             tg3_flag(tp, USE_JUMBO_BDFLAG))
13775                 tg3_flag_set(tp, JUMBO_CAPABLE);
13776
13777         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13778                               &pci_state_reg);
13779
13780         if (pci_is_pcie(tp->pdev)) {
13781                 u16 lnkctl;
13782
13783                 tg3_flag_set(tp, PCI_EXPRESS);
13784
13785                 tp->pcie_readrq = 4096;
13786                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13787                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13788                         tp->pcie_readrq = 2048;
13789
13790                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13791
13792                 pci_read_config_word(tp->pdev,
13793                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13794                                      &lnkctl);
13795                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13796                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13797                             ASIC_REV_5906) {
13798                                 tg3_flag_clear(tp, HW_TSO_2);
13799                                 tg3_flag_clear(tp, TSO_CAPABLE);
13800                         }
13801                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13802                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13803                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13804                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13805                                 tg3_flag_set(tp, CLKREQ_BUG);
13806                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13807                         tg3_flag_set(tp, L1PLLPD_EN);
13808                 }
13809         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13810                 /* BCM5785 devices are effectively PCIe devices, and should
13811                  * follow PCIe codepaths, but do not have a PCIe capabilities
13812                  * section.
13813                 */
13814                 tg3_flag_set(tp, PCI_EXPRESS);
13815         } else if (!tg3_flag(tp, 5705_PLUS) ||
13816                    tg3_flag(tp, 5780_CLASS)) {
13817                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13818                 if (!tp->pcix_cap) {
13819                         dev_err(&tp->pdev->dev,
13820                                 "Cannot find PCI-X capability, aborting\n");
13821                         return -EIO;
13822                 }
13823
13824                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13825                         tg3_flag_set(tp, PCIX_MODE);
13826         }
13827
13828         /* If we have an AMD 762 or VIA K8T800 chipset, write
13829          * reordering to the mailbox registers done by the host
13830          * controller can cause major troubles.  We read back from
13831          * every mailbox register write to force the writes to be
13832          * posted to the chip in order.
13833          */
13834         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13835             !tg3_flag(tp, PCI_EXPRESS))
13836                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13837
13838         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13839                              &tp->pci_cacheline_sz);
13840         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13841                              &tp->pci_lat_timer);
13842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13843             tp->pci_lat_timer < 64) {
13844                 tp->pci_lat_timer = 64;
13845                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13846                                       tp->pci_lat_timer);
13847         }
13848
13849         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13850                 /* 5700 BX chips need to have their TX producer index
13851                  * mailboxes written twice to workaround a bug.
13852                  */
13853                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13854
13855                 /* If we are in PCI-X mode, enable register write workaround.
13856                  *
13857                  * The workaround is to use indirect register accesses
13858                  * for all chip writes not to mailbox registers.
13859                  */
13860                 if (tg3_flag(tp, PCIX_MODE)) {
13861                         u32 pm_reg;
13862
13863                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13864
13865                         /* The chip can have it's power management PCI config
13866                          * space registers clobbered due to this bug.
13867                          * So explicitly force the chip into D0 here.
13868                          */
13869                         pci_read_config_dword(tp->pdev,
13870                                               tp->pm_cap + PCI_PM_CTRL,
13871                                               &pm_reg);
13872                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13873                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13874                         pci_write_config_dword(tp->pdev,
13875                                                tp->pm_cap + PCI_PM_CTRL,
13876                                                pm_reg);
13877
13878                         /* Also, force SERR#/PERR# in PCI command. */
13879                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13880                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13881                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13882                 }
13883         }
13884
13885         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13886                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13887         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13888                 tg3_flag_set(tp, PCI_32BIT);
13889
13890         /* Chip-specific fixup from Broadcom driver */
13891         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13892             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13893                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13894                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13895         }
13896
13897         /* Default fast path register access methods */
13898         tp->read32 = tg3_read32;
13899         tp->write32 = tg3_write32;
13900         tp->read32_mbox = tg3_read32;
13901         tp->write32_mbox = tg3_write32;
13902         tp->write32_tx_mbox = tg3_write32;
13903         tp->write32_rx_mbox = tg3_write32;
13904
13905         /* Various workaround register access methods */
13906         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13907                 tp->write32 = tg3_write_indirect_reg32;
13908         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13909                  (tg3_flag(tp, PCI_EXPRESS) &&
13910                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13911                 /*
13912                  * Back to back register writes can cause problems on these
13913                  * chips, the workaround is to read back all reg writes
13914                  * except those to mailbox regs.
13915                  *
13916                  * See tg3_write_indirect_reg32().
13917                  */
13918                 tp->write32 = tg3_write_flush_reg32;
13919         }
13920
13921         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13922                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13923                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13924                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13925         }
13926
13927         if (tg3_flag(tp, ICH_WORKAROUND)) {
13928                 tp->read32 = tg3_read_indirect_reg32;
13929                 tp->write32 = tg3_write_indirect_reg32;
13930                 tp->read32_mbox = tg3_read_indirect_mbox;
13931                 tp->write32_mbox = tg3_write_indirect_mbox;
13932                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13933                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13934
13935                 iounmap(tp->regs);
13936                 tp->regs = NULL;
13937
13938                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13939                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13940                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13941         }
13942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13943                 tp->read32_mbox = tg3_read32_mbox_5906;
13944                 tp->write32_mbox = tg3_write32_mbox_5906;
13945                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13946                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13947         }
13948
13949         if (tp->write32 == tg3_write_indirect_reg32 ||
13950             (tg3_flag(tp, PCIX_MODE) &&
13951              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13952               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13953                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13954
13955         /* Get eeprom hw config before calling tg3_set_power_state().
13956          * In particular, the TG3_FLAG_IS_NIC flag must be
13957          * determined before calling tg3_set_power_state() so that
13958          * we know whether or not to switch out of Vaux power.
13959          * When the flag is set, it means that GPIO1 is used for eeprom
13960          * write protect and also implies that it is a LOM where GPIOs
13961          * are not used to switch power.
13962          */
13963         tg3_get_eeprom_hw_cfg(tp);
13964
13965         if (tg3_flag(tp, ENABLE_APE)) {
13966                 /* Allow reads and writes to the
13967                  * APE register and memory space.
13968                  */
13969                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13970                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13971                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13972                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13973                                        pci_state_reg);
13974         }
13975
13976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13979             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13980             tg3_flag(tp, 57765_PLUS))
13981                 tg3_flag_set(tp, CPMU_PRESENT);
13982
13983         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13984          * GPIO1 driven high will bring 5700's external PHY out of reset.
13985          * It is also used as eeprom write protect on LOMs.
13986          */
13987         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13988         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13989             tg3_flag(tp, EEPROM_WRITE_PROT))
13990                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13991                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13992         /* Unused GPIO3 must be driven as output on 5752 because there
13993          * are no pull-up resistors on unused GPIO pins.
13994          */
13995         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13996                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13997
13998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14001                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14002
14003         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14004             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14005                 /* Turn off the debug UART. */
14006                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14007                 if (tg3_flag(tp, IS_NIC))
14008                         /* Keep VMain power. */
14009                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14010                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14011         }
14012
14013         /* Force the chip into D0. */
14014         err = tg3_power_up(tp);
14015         if (err) {
14016                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14017                 return err;
14018         }
14019
14020         /* Derive initial jumbo mode from MTU assigned in
14021          * ether_setup() via the alloc_etherdev() call
14022          */
14023         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14024                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14025
14026         /* Determine WakeOnLan speed to use. */
14027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14028             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14029             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14030             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14031                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14032         } else {
14033                 tg3_flag_set(tp, WOL_SPEED_100MB);
14034         }
14035
14036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14037                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14038
14039         /* A few boards don't want Ethernet@WireSpeed phy feature */
14040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14041             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14042              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14043              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14044             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14045             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14046                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14047
14048         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14049             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14050                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14051         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14052                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14053
14054         if (tg3_flag(tp, 5705_PLUS) &&
14055             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14056             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14057             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14058             !tg3_flag(tp, 57765_PLUS)) {
14059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14060                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14062                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14063                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14064                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14065                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14066                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14067                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14068                 } else
14069                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14070         }
14071
14072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14073             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14074                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14075                 if (tp->phy_otp == 0)
14076                         tp->phy_otp = TG3_OTP_DEFAULT;
14077         }
14078
14079         if (tg3_flag(tp, CPMU_PRESENT))
14080                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14081         else
14082                 tp->mi_mode = MAC_MI_MODE_BASE;
14083
14084         tp->coalesce_mode = 0;
14085         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14086             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14087                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14088
14089         /* Set these bits to enable statistics workaround. */
14090         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14091             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14092             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14093                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14094                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14095         }
14096
14097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14099                 tg3_flag_set(tp, USE_PHYLIB);
14100
14101         err = tg3_mdio_init(tp);
14102         if (err)
14103                 return err;
14104
14105         /* Initialize data/descriptor byte/word swapping. */
14106         val = tr32(GRC_MODE);
14107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14108                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14109                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14110                         GRC_MODE_B2HRX_ENABLE |
14111                         GRC_MODE_HTX2B_ENABLE |
14112                         GRC_MODE_HOST_STACKUP);
14113         else
14114                 val &= GRC_MODE_HOST_STACKUP;
14115
14116         tw32(GRC_MODE, val | tp->grc_mode);
14117
14118         tg3_switch_clocks(tp);
14119
14120         /* Clear this out for sanity. */
14121         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14122
14123         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14124                               &pci_state_reg);
14125         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14126             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14127                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14128
14129                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14130                     chiprevid == CHIPREV_ID_5701_B0 ||
14131                     chiprevid == CHIPREV_ID_5701_B2 ||
14132                     chiprevid == CHIPREV_ID_5701_B5) {
14133                         void __iomem *sram_base;
14134
14135                         /* Write some dummy words into the SRAM status block
14136                          * area, see if it reads back correctly.  If the return
14137                          * value is bad, force enable the PCIX workaround.
14138                          */
14139                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14140
14141                         writel(0x00000000, sram_base);
14142                         writel(0x00000000, sram_base + 4);
14143                         writel(0xffffffff, sram_base + 4);
14144                         if (readl(sram_base) != 0x00000000)
14145                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14146                 }
14147         }
14148
14149         udelay(50);
14150         tg3_nvram_init(tp);
14151
14152         grc_misc_cfg = tr32(GRC_MISC_CFG);
14153         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14154
14155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14156             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14157              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14158                 tg3_flag_set(tp, IS_5788);
14159
14160         if (!tg3_flag(tp, IS_5788) &&
14161             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14162                 tg3_flag_set(tp, TAGGED_STATUS);
14163         if (tg3_flag(tp, TAGGED_STATUS)) {
14164                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14165                                       HOSTCC_MODE_CLRTICK_TXBD);
14166
14167                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14168                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14169                                        tp->misc_host_ctrl);
14170         }
14171
14172         /* Preserve the APE MAC_MODE bits */
14173         if (tg3_flag(tp, ENABLE_APE))
14174                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14175         else
14176                 tp->mac_mode = TG3_DEF_MAC_MODE;
14177
14178         /* these are limited to 10/100 only */
14179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14180              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14181             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14182              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14183              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14184               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14185               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14186             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14187              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14188               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14189               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14190             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14191             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14192             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14193             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14194                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14195
14196         err = tg3_phy_probe(tp);
14197         if (err) {
14198                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14199                 /* ... but do not return immediately ... */
14200                 tg3_mdio_fini(tp);
14201         }
14202
14203         tg3_read_vpd(tp);
14204         tg3_read_fw_ver(tp);
14205
14206         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14207                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14208         } else {
14209                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14210                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14211                 else
14212                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14213         }
14214
14215         /* 5700 {AX,BX} chips have a broken status block link
14216          * change bit implementation, so we must use the
14217          * status register in those cases.
14218          */
14219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14220                 tg3_flag_set(tp, USE_LINKCHG_REG);
14221         else
14222                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14223
14224         /* The led_ctrl is set during tg3_phy_probe, here we might
14225          * have to force the link status polling mechanism based
14226          * upon subsystem IDs.
14227          */
14228         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14230             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14231                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14232                 tg3_flag_set(tp, USE_LINKCHG_REG);
14233         }
14234
14235         /* For all SERDES we poll the MAC status register. */
14236         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14237                 tg3_flag_set(tp, POLL_SERDES);
14238         else
14239                 tg3_flag_clear(tp, POLL_SERDES);
14240
14241         tp->rx_offset = NET_IP_ALIGN;
14242         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14244             tg3_flag(tp, PCIX_MODE)) {
14245                 tp->rx_offset = 0;
14246 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14247                 tp->rx_copy_thresh = ~(u16)0;
14248 #endif
14249         }
14250
14251         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14252         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14253         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14254
14255         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14256
14257         /* Increment the rx prod index on the rx std ring by at most
14258          * 8 for these chips to workaround hw errata.
14259          */
14260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14261             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14262             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14263                 tp->rx_std_max_post = 8;
14264
14265         if (tg3_flag(tp, ASPM_WORKAROUND))
14266                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14267                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14268
14269         return err;
14270 }
14271
14272 #ifdef CONFIG_SPARC
14273 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14274 {
14275         struct net_device *dev = tp->dev;
14276         struct pci_dev *pdev = tp->pdev;
14277         struct device_node *dp = pci_device_to_OF_node(pdev);
14278         const unsigned char *addr;
14279         int len;
14280
14281         addr = of_get_property(dp, "local-mac-address", &len);
14282         if (addr && len == 6) {
14283                 memcpy(dev->dev_addr, addr, 6);
14284                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14285                 return 0;
14286         }
14287         return -ENODEV;
14288 }
14289
14290 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14291 {
14292         struct net_device *dev = tp->dev;
14293
14294         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14295         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14296         return 0;
14297 }
14298 #endif
14299
14300 static int __devinit tg3_get_device_address(struct tg3 *tp)
14301 {
14302         struct net_device *dev = tp->dev;
14303         u32 hi, lo, mac_offset;
14304         int addr_ok = 0;
14305
14306 #ifdef CONFIG_SPARC
14307         if (!tg3_get_macaddr_sparc(tp))
14308                 return 0;
14309 #endif
14310
14311         mac_offset = 0x7c;
14312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14313             tg3_flag(tp, 5780_CLASS)) {
14314                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14315                         mac_offset = 0xcc;
14316                 if (tg3_nvram_lock(tp))
14317                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14318                 else
14319                         tg3_nvram_unlock(tp);
14320         } else if (tg3_flag(tp, 5717_PLUS)) {
14321                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14322                         mac_offset = 0xcc;
14323                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14324                         mac_offset += 0x18c;
14325         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14326                 mac_offset = 0x10;
14327
14328         /* First try to get it from MAC address mailbox. */
14329         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14330         if ((hi >> 16) == 0x484b) {
14331                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14332                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14333
14334                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14335                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14336                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14337                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14338                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14339
14340                 /* Some old bootcode may report a 0 MAC address in SRAM */
14341                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14342         }
14343         if (!addr_ok) {
14344                 /* Next, try NVRAM. */
14345                 if (!tg3_flag(tp, NO_NVRAM) &&
14346                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14347                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14348                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14349                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14350                 }
14351                 /* Finally just fetch it out of the MAC control regs. */
14352                 else {
14353                         hi = tr32(MAC_ADDR_0_HIGH);
14354                         lo = tr32(MAC_ADDR_0_LOW);
14355
14356                         dev->dev_addr[5] = lo & 0xff;
14357                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14358                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14359                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14360                         dev->dev_addr[1] = hi & 0xff;
14361                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14362                 }
14363         }
14364
14365         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14366 #ifdef CONFIG_SPARC
14367                 if (!tg3_get_default_macaddr_sparc(tp))
14368                         return 0;
14369 #endif
14370                 return -EINVAL;
14371         }
14372         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14373         return 0;
14374 }
14375
14376 #define BOUNDARY_SINGLE_CACHELINE       1
14377 #define BOUNDARY_MULTI_CACHELINE        2
14378
14379 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14380 {
14381         int cacheline_size;
14382         u8 byte;
14383         int goal;
14384
14385         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14386         if (byte == 0)
14387                 cacheline_size = 1024;
14388         else
14389                 cacheline_size = (int) byte * 4;
14390
14391         /* On 5703 and later chips, the boundary bits have no
14392          * effect.
14393          */
14394         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14395             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14396             !tg3_flag(tp, PCI_EXPRESS))
14397                 goto out;
14398
14399 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14400         goal = BOUNDARY_MULTI_CACHELINE;
14401 #else
14402 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14403         goal = BOUNDARY_SINGLE_CACHELINE;
14404 #else
14405         goal = 0;
14406 #endif
14407 #endif
14408
14409         if (tg3_flag(tp, 57765_PLUS)) {
14410                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14411                 goto out;
14412         }
14413
14414         if (!goal)
14415                 goto out;
14416
14417         /* PCI controllers on most RISC systems tend to disconnect
14418          * when a device tries to burst across a cache-line boundary.
14419          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14420          *
14421          * Unfortunately, for PCI-E there are only limited
14422          * write-side controls for this, and thus for reads
14423          * we will still get the disconnects.  We'll also waste
14424          * these PCI cycles for both read and write for chips
14425          * other than 5700 and 5701 which do not implement the
14426          * boundary bits.
14427          */
14428         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14429                 switch (cacheline_size) {
14430                 case 16:
14431                 case 32:
14432                 case 64:
14433                 case 128:
14434                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14435                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14436                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14437                         } else {
14438                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14439                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14440                         }
14441                         break;
14442
14443                 case 256:
14444                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14445                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14446                         break;
14447
14448                 default:
14449                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14450                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14451                         break;
14452                 }
14453         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14454                 switch (cacheline_size) {
14455                 case 16:
14456                 case 32:
14457                 case 64:
14458                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14459                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14460                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14461                                 break;
14462                         }
14463                         /* fallthrough */
14464                 case 128:
14465                 default:
14466                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14467                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14468                         break;
14469                 }
14470         } else {
14471                 switch (cacheline_size) {
14472                 case 16:
14473                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14474                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14475                                         DMA_RWCTRL_WRITE_BNDRY_16);
14476                                 break;
14477                         }
14478                         /* fallthrough */
14479                 case 32:
14480                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14481                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14482                                         DMA_RWCTRL_WRITE_BNDRY_32);
14483                                 break;
14484                         }
14485                         /* fallthrough */
14486                 case 64:
14487                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14488                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14489                                         DMA_RWCTRL_WRITE_BNDRY_64);
14490                                 break;
14491                         }
14492                         /* fallthrough */
14493                 case 128:
14494                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14495                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14496                                         DMA_RWCTRL_WRITE_BNDRY_128);
14497                                 break;
14498                         }
14499                         /* fallthrough */
14500                 case 256:
14501                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14502                                 DMA_RWCTRL_WRITE_BNDRY_256);
14503                         break;
14504                 case 512:
14505                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14506                                 DMA_RWCTRL_WRITE_BNDRY_512);
14507                         break;
14508                 case 1024:
14509                 default:
14510                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14511                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14512                         break;
14513                 }
14514         }
14515
14516 out:
14517         return val;
14518 }
14519
14520 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14521 {
14522         struct tg3_internal_buffer_desc test_desc;
14523         u32 sram_dma_descs;
14524         int i, ret;
14525
14526         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14527
14528         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14529         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14530         tw32(RDMAC_STATUS, 0);
14531         tw32(WDMAC_STATUS, 0);
14532
14533         tw32(BUFMGR_MODE, 0);
14534         tw32(FTQ_RESET, 0);
14535
14536         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14537         test_desc.addr_lo = buf_dma & 0xffffffff;
14538         test_desc.nic_mbuf = 0x00002100;
14539         test_desc.len = size;
14540
14541         /*
14542          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14543          * the *second* time the tg3 driver was getting loaded after an
14544          * initial scan.
14545          *
14546          * Broadcom tells me:
14547          *   ...the DMA engine is connected to the GRC block and a DMA
14548          *   reset may affect the GRC block in some unpredictable way...
14549          *   The behavior of resets to individual blocks has not been tested.
14550          *
14551          * Broadcom noted the GRC reset will also reset all sub-components.
14552          */
14553         if (to_device) {
14554                 test_desc.cqid_sqid = (13 << 8) | 2;
14555
14556                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14557                 udelay(40);
14558         } else {
14559                 test_desc.cqid_sqid = (16 << 8) | 7;
14560
14561                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14562                 udelay(40);
14563         }
14564         test_desc.flags = 0x00000005;
14565
14566         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14567                 u32 val;
14568
14569                 val = *(((u32 *)&test_desc) + i);
14570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14571                                        sram_dma_descs + (i * sizeof(u32)));
14572                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14573         }
14574         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14575
14576         if (to_device)
14577                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14578         else
14579                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14580
14581         ret = -ENODEV;
14582         for (i = 0; i < 40; i++) {
14583                 u32 val;
14584
14585                 if (to_device)
14586                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14587                 else
14588                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14589                 if ((val & 0xffff) == sram_dma_descs) {
14590                         ret = 0;
14591                         break;
14592                 }
14593
14594                 udelay(100);
14595         }
14596
14597         return ret;
14598 }
14599
14600 #define TEST_BUFFER_SIZE        0x2000
14601
14602 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14603         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14604         { },
14605 };
14606
14607 static int __devinit tg3_test_dma(struct tg3 *tp)
14608 {
14609         dma_addr_t buf_dma;
14610         u32 *buf, saved_dma_rwctrl;
14611         int ret = 0;
14612
14613         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14614                                  &buf_dma, GFP_KERNEL);
14615         if (!buf) {
14616                 ret = -ENOMEM;
14617                 goto out_nofree;
14618         }
14619
14620         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14621                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14622
14623         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14624
14625         if (tg3_flag(tp, 57765_PLUS))
14626                 goto out;
14627
14628         if (tg3_flag(tp, PCI_EXPRESS)) {
14629                 /* DMA read watermark not used on PCIE */
14630                 tp->dma_rwctrl |= 0x00180000;
14631         } else if (!tg3_flag(tp, PCIX_MODE)) {
14632                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14633                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14634                         tp->dma_rwctrl |= 0x003f0000;
14635                 else
14636                         tp->dma_rwctrl |= 0x003f000f;
14637         } else {
14638                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14639                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14640                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14641                         u32 read_water = 0x7;
14642
14643                         /* If the 5704 is behind the EPB bridge, we can
14644                          * do the less restrictive ONE_DMA workaround for
14645                          * better performance.
14646                          */
14647                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14648                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14649                                 tp->dma_rwctrl |= 0x8000;
14650                         else if (ccval == 0x6 || ccval == 0x7)
14651                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14652
14653                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14654                                 read_water = 4;
14655                         /* Set bit 23 to enable PCIX hw bug fix */
14656                         tp->dma_rwctrl |=
14657                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14658                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14659                                 (1 << 23);
14660                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14661                         /* 5780 always in PCIX mode */
14662                         tp->dma_rwctrl |= 0x00144000;
14663                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14664                         /* 5714 always in PCIX mode */
14665                         tp->dma_rwctrl |= 0x00148000;
14666                 } else {
14667                         tp->dma_rwctrl |= 0x001b000f;
14668                 }
14669         }
14670
14671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14673                 tp->dma_rwctrl &= 0xfffffff0;
14674
14675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14677                 /* Remove this if it causes problems for some boards. */
14678                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14679
14680                 /* On 5700/5701 chips, we need to set this bit.
14681                  * Otherwise the chip will issue cacheline transactions
14682                  * to streamable DMA memory with not all the byte
14683                  * enables turned on.  This is an error on several
14684                  * RISC PCI controllers, in particular sparc64.
14685                  *
14686                  * On 5703/5704 chips, this bit has been reassigned
14687                  * a different meaning.  In particular, it is used
14688                  * on those chips to enable a PCI-X workaround.
14689                  */
14690                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14691         }
14692
14693         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14694
14695 #if 0
14696         /* Unneeded, already done by tg3_get_invariants.  */
14697         tg3_switch_clocks(tp);
14698 #endif
14699
14700         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14701             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14702                 goto out;
14703
14704         /* It is best to perform DMA test with maximum write burst size
14705          * to expose the 5700/5701 write DMA bug.
14706          */
14707         saved_dma_rwctrl = tp->dma_rwctrl;
14708         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14709         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14710
14711         while (1) {
14712                 u32 *p = buf, i;
14713
14714                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14715                         p[i] = i;
14716
14717                 /* Send the buffer to the chip. */
14718                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14719                 if (ret) {
14720                         dev_err(&tp->pdev->dev,
14721                                 "%s: Buffer write failed. err = %d\n",
14722                                 __func__, ret);
14723                         break;
14724                 }
14725
14726 #if 0
14727                 /* validate data reached card RAM correctly. */
14728                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14729                         u32 val;
14730                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14731                         if (le32_to_cpu(val) != p[i]) {
14732                                 dev_err(&tp->pdev->dev,
14733                                         "%s: Buffer corrupted on device! "
14734                                         "(%d != %d)\n", __func__, val, i);
14735                                 /* ret = -ENODEV here? */
14736                         }
14737                         p[i] = 0;
14738                 }
14739 #endif
14740                 /* Now read it back. */
14741                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14742                 if (ret) {
14743                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14744                                 "err = %d\n", __func__, ret);
14745                         break;
14746                 }
14747
14748                 /* Verify it. */
14749                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14750                         if (p[i] == i)
14751                                 continue;
14752
14753                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14754                             DMA_RWCTRL_WRITE_BNDRY_16) {
14755                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14756                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14757                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14758                                 break;
14759                         } else {
14760                                 dev_err(&tp->pdev->dev,
14761                                         "%s: Buffer corrupted on read back! "
14762                                         "(%d != %d)\n", __func__, p[i], i);
14763                                 ret = -ENODEV;
14764                                 goto out;
14765                         }
14766                 }
14767
14768                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14769                         /* Success. */
14770                         ret = 0;
14771                         break;
14772                 }
14773         }
14774         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14775             DMA_RWCTRL_WRITE_BNDRY_16) {
14776                 /* DMA test passed without adjusting DMA boundary,
14777                  * now look for chipsets that are known to expose the
14778                  * DMA bug without failing the test.
14779                  */
14780                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14781                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14782                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14783                 } else {
14784                         /* Safe to use the calculated DMA boundary. */
14785                         tp->dma_rwctrl = saved_dma_rwctrl;
14786                 }
14787
14788                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14789         }
14790
14791 out:
14792         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14793 out_nofree:
14794         return ret;
14795 }
14796
14797 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14798 {
14799         if (tg3_flag(tp, 57765_PLUS)) {
14800                 tp->bufmgr_config.mbuf_read_dma_low_water =
14801                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14802                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14803                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14804                 tp->bufmgr_config.mbuf_high_water =
14805                         DEFAULT_MB_HIGH_WATER_57765;
14806
14807                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14808                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14809                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14810                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14811                 tp->bufmgr_config.mbuf_high_water_jumbo =
14812                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14813         } else if (tg3_flag(tp, 5705_PLUS)) {
14814                 tp->bufmgr_config.mbuf_read_dma_low_water =
14815                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14816                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14817                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14818                 tp->bufmgr_config.mbuf_high_water =
14819                         DEFAULT_MB_HIGH_WATER_5705;
14820                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14821                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14822                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14823                         tp->bufmgr_config.mbuf_high_water =
14824                                 DEFAULT_MB_HIGH_WATER_5906;
14825                 }
14826
14827                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14828                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14829                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14830                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14831                 tp->bufmgr_config.mbuf_high_water_jumbo =
14832                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14833         } else {
14834                 tp->bufmgr_config.mbuf_read_dma_low_water =
14835                         DEFAULT_MB_RDMA_LOW_WATER;
14836                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14837                         DEFAULT_MB_MACRX_LOW_WATER;
14838                 tp->bufmgr_config.mbuf_high_water =
14839                         DEFAULT_MB_HIGH_WATER;
14840
14841                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14842                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14843                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14844                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14845                 tp->bufmgr_config.mbuf_high_water_jumbo =
14846                         DEFAULT_MB_HIGH_WATER_JUMBO;
14847         }
14848
14849         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14850         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14851 }
14852
14853 static char * __devinit tg3_phy_string(struct tg3 *tp)
14854 {
14855         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14856         case TG3_PHY_ID_BCM5400:        return "5400";
14857         case TG3_PHY_ID_BCM5401:        return "5401";
14858         case TG3_PHY_ID_BCM5411:        return "5411";
14859         case TG3_PHY_ID_BCM5701:        return "5701";
14860         case TG3_PHY_ID_BCM5703:        return "5703";
14861         case TG3_PHY_ID_BCM5704:        return "5704";
14862         case TG3_PHY_ID_BCM5705:        return "5705";
14863         case TG3_PHY_ID_BCM5750:        return "5750";
14864         case TG3_PHY_ID_BCM5752:        return "5752";
14865         case TG3_PHY_ID_BCM5714:        return "5714";
14866         case TG3_PHY_ID_BCM5780:        return "5780";
14867         case TG3_PHY_ID_BCM5755:        return "5755";
14868         case TG3_PHY_ID_BCM5787:        return "5787";
14869         case TG3_PHY_ID_BCM5784:        return "5784";
14870         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14871         case TG3_PHY_ID_BCM5906:        return "5906";
14872         case TG3_PHY_ID_BCM5761:        return "5761";
14873         case TG3_PHY_ID_BCM5718C:       return "5718C";
14874         case TG3_PHY_ID_BCM5718S:       return "5718S";
14875         case TG3_PHY_ID_BCM57765:       return "57765";
14876         case TG3_PHY_ID_BCM5719C:       return "5719C";
14877         case TG3_PHY_ID_BCM5720C:       return "5720C";
14878         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14879         case 0:                 return "serdes";
14880         default:                return "unknown";
14881         }
14882 }
14883
14884 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14885 {
14886         if (tg3_flag(tp, PCI_EXPRESS)) {
14887                 strcpy(str, "PCI Express");
14888                 return str;
14889         } else if (tg3_flag(tp, PCIX_MODE)) {
14890                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14891
14892                 strcpy(str, "PCIX:");
14893
14894                 if ((clock_ctrl == 7) ||
14895                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14896                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14897                         strcat(str, "133MHz");
14898                 else if (clock_ctrl == 0)
14899                         strcat(str, "33MHz");
14900                 else if (clock_ctrl == 2)
14901                         strcat(str, "50MHz");
14902                 else if (clock_ctrl == 4)
14903                         strcat(str, "66MHz");
14904                 else if (clock_ctrl == 6)
14905                         strcat(str, "100MHz");
14906         } else {
14907                 strcpy(str, "PCI:");
14908                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14909                         strcat(str, "66MHz");
14910                 else
14911                         strcat(str, "33MHz");
14912         }
14913         if (tg3_flag(tp, PCI_32BIT))
14914                 strcat(str, ":32-bit");
14915         else
14916                 strcat(str, ":64-bit");
14917         return str;
14918 }
14919
14920 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14921 {
14922         struct pci_dev *peer;
14923         unsigned int func, devnr = tp->pdev->devfn & ~7;
14924
14925         for (func = 0; func < 8; func++) {
14926                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14927                 if (peer && peer != tp->pdev)
14928                         break;
14929                 pci_dev_put(peer);
14930         }
14931         /* 5704 can be configured in single-port mode, set peer to
14932          * tp->pdev in that case.
14933          */
14934         if (!peer) {
14935                 peer = tp->pdev;
14936                 return peer;
14937         }
14938
14939         /*
14940          * We don't need to keep the refcount elevated; there's no way
14941          * to remove one half of this device without removing the other
14942          */
14943         pci_dev_put(peer);
14944
14945         return peer;
14946 }
14947
14948 static void __devinit tg3_init_coal(struct tg3 *tp)
14949 {
14950         struct ethtool_coalesce *ec = &tp->coal;
14951
14952         memset(ec, 0, sizeof(*ec));
14953         ec->cmd = ETHTOOL_GCOALESCE;
14954         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14955         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14956         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14957         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14958         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14959         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14960         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14961         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14962         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14963
14964         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14965                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14966                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14967                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14968                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14969                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14970         }
14971
14972         if (tg3_flag(tp, 5705_PLUS)) {
14973                 ec->rx_coalesce_usecs_irq = 0;
14974                 ec->tx_coalesce_usecs_irq = 0;
14975                 ec->stats_block_coalesce_usecs = 0;
14976         }
14977 }
14978
14979 static const struct net_device_ops tg3_netdev_ops = {
14980         .ndo_open               = tg3_open,
14981         .ndo_stop               = tg3_close,
14982         .ndo_start_xmit         = tg3_start_xmit,
14983         .ndo_get_stats64        = tg3_get_stats64,
14984         .ndo_validate_addr      = eth_validate_addr,
14985         .ndo_set_multicast_list = tg3_set_rx_mode,
14986         .ndo_set_mac_address    = tg3_set_mac_addr,
14987         .ndo_do_ioctl           = tg3_ioctl,
14988         .ndo_tx_timeout         = tg3_tx_timeout,
14989         .ndo_change_mtu         = tg3_change_mtu,
14990         .ndo_fix_features       = tg3_fix_features,
14991         .ndo_set_features       = tg3_set_features,
14992 #ifdef CONFIG_NET_POLL_CONTROLLER
14993         .ndo_poll_controller    = tg3_poll_controller,
14994 #endif
14995 };
14996
14997 static int __devinit tg3_init_one(struct pci_dev *pdev,
14998                                   const struct pci_device_id *ent)
14999 {
15000         struct net_device *dev;
15001         struct tg3 *tp;
15002         int i, err, pm_cap;
15003         u32 sndmbx, rcvmbx, intmbx;
15004         char str[40];
15005         u64 dma_mask, persist_dma_mask;
15006         u32 features = 0;
15007
15008         printk_once(KERN_INFO "%s\n", version);
15009
15010         err = pci_enable_device(pdev);
15011         if (err) {
15012                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15013                 return err;
15014         }
15015
15016         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15017         if (err) {
15018                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15019                 goto err_out_disable_pdev;
15020         }
15021
15022         pci_set_master(pdev);
15023
15024         /* Find power-management capability. */
15025         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15026         if (pm_cap == 0) {
15027                 dev_err(&pdev->dev,
15028                         "Cannot find Power Management capability, aborting\n");
15029                 err = -EIO;
15030                 goto err_out_free_res;
15031         }
15032
15033         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15034         if (!dev) {
15035                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15036                 err = -ENOMEM;
15037                 goto err_out_free_res;
15038         }
15039
15040         SET_NETDEV_DEV(dev, &pdev->dev);
15041
15042         tp = netdev_priv(dev);
15043         tp->pdev = pdev;
15044         tp->dev = dev;
15045         tp->pm_cap = pm_cap;
15046         tp->rx_mode = TG3_DEF_RX_MODE;
15047         tp->tx_mode = TG3_DEF_TX_MODE;
15048
15049         if (tg3_debug > 0)
15050                 tp->msg_enable = tg3_debug;
15051         else
15052                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15053
15054         /* The word/byte swap controls here control register access byte
15055          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15056          * setting below.
15057          */
15058         tp->misc_host_ctrl =
15059                 MISC_HOST_CTRL_MASK_PCI_INT |
15060                 MISC_HOST_CTRL_WORD_SWAP |
15061                 MISC_HOST_CTRL_INDIR_ACCESS |
15062                 MISC_HOST_CTRL_PCISTATE_RW;
15063
15064         /* The NONFRM (non-frame) byte/word swap controls take effect
15065          * on descriptor entries, anything which isn't packet data.
15066          *
15067          * The StrongARM chips on the board (one for tx, one for rx)
15068          * are running in big-endian mode.
15069          */
15070         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15071                         GRC_MODE_WSWAP_NONFRM_DATA);
15072 #ifdef __BIG_ENDIAN
15073         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15074 #endif
15075         spin_lock_init(&tp->lock);
15076         spin_lock_init(&tp->indirect_lock);
15077         INIT_WORK(&tp->reset_task, tg3_reset_task);
15078
15079         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15080         if (!tp->regs) {
15081                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15082                 err = -ENOMEM;
15083                 goto err_out_free_dev;
15084         }
15085
15086         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15087         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15088
15089         dev->ethtool_ops = &tg3_ethtool_ops;
15090         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15091         dev->netdev_ops = &tg3_netdev_ops;
15092         dev->irq = pdev->irq;
15093
15094         err = tg3_get_invariants(tp);
15095         if (err) {
15096                 dev_err(&pdev->dev,
15097                         "Problem fetching invariants of chip, aborting\n");
15098                 goto err_out_iounmap;
15099         }
15100
15101         /* The EPB bridge inside 5714, 5715, and 5780 and any
15102          * device behind the EPB cannot support DMA addresses > 40-bit.
15103          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15104          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15105          * do DMA address check in tg3_start_xmit().
15106          */
15107         if (tg3_flag(tp, IS_5788))
15108                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15109         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15110                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15111 #ifdef CONFIG_HIGHMEM
15112                 dma_mask = DMA_BIT_MASK(64);
15113 #endif
15114         } else
15115                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15116
15117         /* Configure DMA attributes. */
15118         if (dma_mask > DMA_BIT_MASK(32)) {
15119                 err = pci_set_dma_mask(pdev, dma_mask);
15120                 if (!err) {
15121                         features |= NETIF_F_HIGHDMA;
15122                         err = pci_set_consistent_dma_mask(pdev,
15123                                                           persist_dma_mask);
15124                         if (err < 0) {
15125                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15126                                         "DMA for consistent allocations\n");
15127                                 goto err_out_iounmap;
15128                         }
15129                 }
15130         }
15131         if (err || dma_mask == DMA_BIT_MASK(32)) {
15132                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15133                 if (err) {
15134                         dev_err(&pdev->dev,
15135                                 "No usable DMA configuration, aborting\n");
15136                         goto err_out_iounmap;
15137                 }
15138         }
15139
15140         tg3_init_bufmgr_config(tp);
15141
15142         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15143
15144         /* 5700 B0 chips do not support checksumming correctly due
15145          * to hardware bugs.
15146          */
15147         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15148                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15149
15150                 if (tg3_flag(tp, 5755_PLUS))
15151                         features |= NETIF_F_IPV6_CSUM;
15152         }
15153
15154         /* TSO is on by default on chips that support hardware TSO.
15155          * Firmware TSO on older chips gives lower performance, so it
15156          * is off by default, but can be enabled using ethtool.
15157          */
15158         if ((tg3_flag(tp, HW_TSO_1) ||
15159              tg3_flag(tp, HW_TSO_2) ||
15160              tg3_flag(tp, HW_TSO_3)) &&
15161             (features & NETIF_F_IP_CSUM))
15162                 features |= NETIF_F_TSO;
15163         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15164                 if (features & NETIF_F_IPV6_CSUM)
15165                         features |= NETIF_F_TSO6;
15166                 if (tg3_flag(tp, HW_TSO_3) ||
15167                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15168                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15169                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15170                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15171                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15172                         features |= NETIF_F_TSO_ECN;
15173         }
15174
15175         dev->features |= features;
15176         dev->vlan_features |= features;
15177
15178         /*
15179          * Add loopback capability only for a subset of devices that support
15180          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15181          * loopback for the remaining devices.
15182          */
15183         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15184             !tg3_flag(tp, CPMU_PRESENT))
15185                 /* Add the loopback capability */
15186                 features |= NETIF_F_LOOPBACK;
15187
15188         dev->hw_features |= features;
15189
15190         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15191             !tg3_flag(tp, TSO_CAPABLE) &&
15192             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15193                 tg3_flag_set(tp, MAX_RXPEND_64);
15194                 tp->rx_pending = 63;
15195         }
15196
15197         err = tg3_get_device_address(tp);
15198         if (err) {
15199                 dev_err(&pdev->dev,
15200                         "Could not obtain valid ethernet address, aborting\n");
15201                 goto err_out_iounmap;
15202         }
15203
15204         if (tg3_flag(tp, ENABLE_APE)) {
15205                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15206                 if (!tp->aperegs) {
15207                         dev_err(&pdev->dev,
15208                                 "Cannot map APE registers, aborting\n");
15209                         err = -ENOMEM;
15210                         goto err_out_iounmap;
15211                 }
15212
15213                 tg3_ape_lock_init(tp);
15214
15215                 if (tg3_flag(tp, ENABLE_ASF))
15216                         tg3_read_dash_ver(tp);
15217         }
15218
15219         /*
15220          * Reset chip in case UNDI or EFI driver did not shutdown
15221          * DMA self test will enable WDMAC and we'll see (spurious)
15222          * pending DMA on the PCI bus at that point.
15223          */
15224         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15225             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15226                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15227                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15228         }
15229
15230         err = tg3_test_dma(tp);
15231         if (err) {
15232                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15233                 goto err_out_apeunmap;
15234         }
15235
15236         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15237         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15238         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15239         for (i = 0; i < tp->irq_max; i++) {
15240                 struct tg3_napi *tnapi = &tp->napi[i];
15241
15242                 tnapi->tp = tp;
15243                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15244
15245                 tnapi->int_mbox = intmbx;
15246                 if (i < 4)
15247                         intmbx += 0x8;
15248                 else
15249                         intmbx += 0x4;
15250
15251                 tnapi->consmbox = rcvmbx;
15252                 tnapi->prodmbox = sndmbx;
15253
15254                 if (i)
15255                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15256                 else
15257                         tnapi->coal_now = HOSTCC_MODE_NOW;
15258
15259                 if (!tg3_flag(tp, SUPPORT_MSIX))
15260                         break;
15261
15262                 /*
15263                  * If we support MSIX, we'll be using RSS.  If we're using
15264                  * RSS, the first vector only handles link interrupts and the
15265                  * remaining vectors handle rx and tx interrupts.  Reuse the
15266                  * mailbox values for the next iteration.  The values we setup
15267                  * above are still useful for the single vectored mode.
15268                  */
15269                 if (!i)
15270                         continue;
15271
15272                 rcvmbx += 0x8;
15273
15274                 if (sndmbx & 0x4)
15275                         sndmbx -= 0x4;
15276                 else
15277                         sndmbx += 0xc;
15278         }
15279
15280         tg3_init_coal(tp);
15281
15282         pci_set_drvdata(pdev, dev);
15283
15284         err = register_netdev(dev);
15285         if (err) {
15286                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15287                 goto err_out_apeunmap;
15288         }
15289
15290         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15291                     tp->board_part_number,
15292                     tp->pci_chip_rev_id,
15293                     tg3_bus_string(tp, str),
15294                     dev->dev_addr);
15295
15296         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15297                 struct phy_device *phydev;
15298                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15299                 netdev_info(dev,
15300                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15301                             phydev->drv->name, dev_name(&phydev->dev));
15302         } else {
15303                 char *ethtype;
15304
15305                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15306                         ethtype = "10/100Base-TX";
15307                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15308                         ethtype = "1000Base-SX";
15309                 else
15310                         ethtype = "10/100/1000Base-T";
15311
15312                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15313                             "(WireSpeed[%d], EEE[%d])\n",
15314                             tg3_phy_string(tp), ethtype,
15315                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15316                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15317         }
15318
15319         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15320                     (dev->features & NETIF_F_RXCSUM) != 0,
15321                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15322                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15323                     tg3_flag(tp, ENABLE_ASF) != 0,
15324                     tg3_flag(tp, TSO_CAPABLE) != 0);
15325         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15326                     tp->dma_rwctrl,
15327                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15328                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15329
15330         pci_save_state(pdev);
15331
15332         return 0;
15333
15334 err_out_apeunmap:
15335         if (tp->aperegs) {
15336                 iounmap(tp->aperegs);
15337                 tp->aperegs = NULL;
15338         }
15339
15340 err_out_iounmap:
15341         if (tp->regs) {
15342                 iounmap(tp->regs);
15343                 tp->regs = NULL;
15344         }
15345
15346 err_out_free_dev:
15347         free_netdev(dev);
15348
15349 err_out_free_res:
15350         pci_release_regions(pdev);
15351
15352 err_out_disable_pdev:
15353         pci_disable_device(pdev);
15354         pci_set_drvdata(pdev, NULL);
15355         return err;
15356 }
15357
15358 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15359 {
15360         struct net_device *dev = pci_get_drvdata(pdev);
15361
15362         if (dev) {
15363                 struct tg3 *tp = netdev_priv(dev);
15364
15365                 if (tp->fw)
15366                         release_firmware(tp->fw);
15367
15368                 cancel_work_sync(&tp->reset_task);
15369
15370                 if (!tg3_flag(tp, USE_PHYLIB)) {
15371                         tg3_phy_fini(tp);
15372                         tg3_mdio_fini(tp);
15373                 }
15374
15375                 unregister_netdev(dev);
15376                 if (tp->aperegs) {
15377                         iounmap(tp->aperegs);
15378                         tp->aperegs = NULL;
15379                 }
15380                 if (tp->regs) {
15381                         iounmap(tp->regs);
15382                         tp->regs = NULL;
15383                 }
15384                 free_netdev(dev);
15385                 pci_release_regions(pdev);
15386                 pci_disable_device(pdev);
15387                 pci_set_drvdata(pdev, NULL);
15388         }
15389 }
15390
15391 #ifdef CONFIG_PM_SLEEP
15392 static int tg3_suspend(struct device *device)
15393 {
15394         struct pci_dev *pdev = to_pci_dev(device);
15395         struct net_device *dev = pci_get_drvdata(pdev);
15396         struct tg3 *tp = netdev_priv(dev);
15397         int err;
15398
15399         if (!netif_running(dev))
15400                 return 0;
15401
15402         flush_work_sync(&tp->reset_task);
15403         tg3_phy_stop(tp);
15404         tg3_netif_stop(tp);
15405
15406         del_timer_sync(&tp->timer);
15407
15408         tg3_full_lock(tp, 1);
15409         tg3_disable_ints(tp);
15410         tg3_full_unlock(tp);
15411
15412         netif_device_detach(dev);
15413
15414         tg3_full_lock(tp, 0);
15415         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15416         tg3_flag_clear(tp, INIT_COMPLETE);
15417         tg3_full_unlock(tp);
15418
15419         err = tg3_power_down_prepare(tp);
15420         if (err) {
15421                 int err2;
15422
15423                 tg3_full_lock(tp, 0);
15424
15425                 tg3_flag_set(tp, INIT_COMPLETE);
15426                 err2 = tg3_restart_hw(tp, 1);
15427                 if (err2)
15428                         goto out;
15429
15430                 tp->timer.expires = jiffies + tp->timer_offset;
15431                 add_timer(&tp->timer);
15432
15433                 netif_device_attach(dev);
15434                 tg3_netif_start(tp);
15435
15436 out:
15437                 tg3_full_unlock(tp);
15438
15439                 if (!err2)
15440                         tg3_phy_start(tp);
15441         }
15442
15443         return err;
15444 }
15445
15446 static int tg3_resume(struct device *device)
15447 {
15448         struct pci_dev *pdev = to_pci_dev(device);
15449         struct net_device *dev = pci_get_drvdata(pdev);
15450         struct tg3 *tp = netdev_priv(dev);
15451         int err;
15452
15453         if (!netif_running(dev))
15454                 return 0;
15455
15456         netif_device_attach(dev);
15457
15458         tg3_full_lock(tp, 0);
15459
15460         tg3_flag_set(tp, INIT_COMPLETE);
15461         err = tg3_restart_hw(tp, 1);
15462         if (err)
15463                 goto out;
15464
15465         tp->timer.expires = jiffies + tp->timer_offset;
15466         add_timer(&tp->timer);
15467
15468         tg3_netif_start(tp);
15469
15470 out:
15471         tg3_full_unlock(tp);
15472
15473         if (!err)
15474                 tg3_phy_start(tp);
15475
15476         return err;
15477 }
15478
15479 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15480 #define TG3_PM_OPS (&tg3_pm_ops)
15481
15482 #else
15483
15484 #define TG3_PM_OPS NULL
15485
15486 #endif /* CONFIG_PM_SLEEP */
15487
15488 /**
15489  * tg3_io_error_detected - called when PCI error is detected
15490  * @pdev: Pointer to PCI device
15491  * @state: The current pci connection state
15492  *
15493  * This function is called after a PCI bus error affecting
15494  * this device has been detected.
15495  */
15496 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15497                                               pci_channel_state_t state)
15498 {
15499         struct net_device *netdev = pci_get_drvdata(pdev);
15500         struct tg3 *tp = netdev_priv(netdev);
15501         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15502
15503         netdev_info(netdev, "PCI I/O error detected\n");
15504
15505         rtnl_lock();
15506
15507         if (!netif_running(netdev))
15508                 goto done;
15509
15510         tg3_phy_stop(tp);
15511
15512         tg3_netif_stop(tp);
15513
15514         del_timer_sync(&tp->timer);
15515         tg3_flag_clear(tp, RESTART_TIMER);
15516
15517         /* Want to make sure that the reset task doesn't run */
15518         cancel_work_sync(&tp->reset_task);
15519         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15520         tg3_flag_clear(tp, RESTART_TIMER);
15521
15522         netif_device_detach(netdev);
15523
15524         /* Clean up software state, even if MMIO is blocked */
15525         tg3_full_lock(tp, 0);
15526         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15527         tg3_full_unlock(tp);
15528
15529 done:
15530         if (state == pci_channel_io_perm_failure)
15531                 err = PCI_ERS_RESULT_DISCONNECT;
15532         else
15533                 pci_disable_device(pdev);
15534
15535         rtnl_unlock();
15536
15537         return err;
15538 }
15539
15540 /**
15541  * tg3_io_slot_reset - called after the pci bus has been reset.
15542  * @pdev: Pointer to PCI device
15543  *
15544  * Restart the card from scratch, as if from a cold-boot.
15545  * At this point, the card has exprienced a hard reset,
15546  * followed by fixups by BIOS, and has its config space
15547  * set up identically to what it was at cold boot.
15548  */
15549 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15550 {
15551         struct net_device *netdev = pci_get_drvdata(pdev);
15552         struct tg3 *tp = netdev_priv(netdev);
15553         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15554         int err;
15555
15556         rtnl_lock();
15557
15558         if (pci_enable_device(pdev)) {
15559                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15560                 goto done;
15561         }
15562
15563         pci_set_master(pdev);
15564         pci_restore_state(pdev);
15565         pci_save_state(pdev);
15566
15567         if (!netif_running(netdev)) {
15568                 rc = PCI_ERS_RESULT_RECOVERED;
15569                 goto done;
15570         }
15571
15572         err = tg3_power_up(tp);
15573         if (err) {
15574                 netdev_err(netdev, "Failed to restore register access.\n");
15575                 goto done;
15576         }
15577
15578         rc = PCI_ERS_RESULT_RECOVERED;
15579
15580 done:
15581         rtnl_unlock();
15582
15583         return rc;
15584 }
15585
15586 /**
15587  * tg3_io_resume - called when traffic can start flowing again.
15588  * @pdev: Pointer to PCI device
15589  *
15590  * This callback is called when the error recovery driver tells
15591  * us that its OK to resume normal operation.
15592  */
15593 static void tg3_io_resume(struct pci_dev *pdev)
15594 {
15595         struct net_device *netdev = pci_get_drvdata(pdev);
15596         struct tg3 *tp = netdev_priv(netdev);
15597         int err;
15598
15599         rtnl_lock();
15600
15601         if (!netif_running(netdev))
15602                 goto done;
15603
15604         tg3_full_lock(tp, 0);
15605         tg3_flag_set(tp, INIT_COMPLETE);
15606         err = tg3_restart_hw(tp, 1);
15607         tg3_full_unlock(tp);
15608         if (err) {
15609                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15610                 goto done;
15611         }
15612
15613         netif_device_attach(netdev);
15614
15615         tp->timer.expires = jiffies + tp->timer_offset;
15616         add_timer(&tp->timer);
15617
15618         tg3_netif_start(tp);
15619
15620         tg3_phy_start(tp);
15621
15622 done:
15623         rtnl_unlock();
15624 }
15625
15626 static struct pci_error_handlers tg3_err_handler = {
15627         .error_detected = tg3_io_error_detected,
15628         .slot_reset     = tg3_io_slot_reset,
15629         .resume         = tg3_io_resume
15630 };
15631
15632 static struct pci_driver tg3_driver = {
15633         .name           = DRV_MODULE_NAME,
15634         .id_table       = tg3_pci_tbl,
15635         .probe          = tg3_init_one,
15636         .remove         = __devexit_p(tg3_remove_one),
15637         .err_handler    = &tg3_err_handler,
15638         .driver.pm      = TG3_PM_OPS,
15639 };
15640
15641 static int __init tg3_init(void)
15642 {
15643         return pci_register_driver(&tg3_driver);
15644 }
15645
15646 static void __exit tg3_cleanup(void)
15647 {
15648         pci_unregister_driver(&tg3_driver);
15649 }
15650
15651 module_init(tg3_init);
15652 module_exit(tg3_cleanup);