d481b0a998478e5f0a16711af8b9ba0a7dd9ba95
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     123
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "March 21, 2012"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     60
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1457 {
1458         u32 reg, val;
1459
1460         val = 0;
1461         if (!tg3_readphy(tp, MII_BMCR, &reg))
1462                 val = reg << 16;
1463         if (!tg3_readphy(tp, MII_BMSR, &reg))
1464                 val |= (reg & 0xffff);
1465         *data++ = val;
1466
1467         val = 0;
1468         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469                 val = reg << 16;
1470         if (!tg3_readphy(tp, MII_LPA, &reg))
1471                 val |= (reg & 0xffff);
1472         *data++ = val;
1473
1474         val = 0;
1475         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477                         val = reg << 16;
1478                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479                         val |= (reg & 0xffff);
1480         }
1481         *data++ = val;
1482
1483         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484                 val = reg << 16;
1485         else
1486                 val = 0;
1487         *data++ = val;
1488 }
1489
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1492 {
1493         u32 data[4];
1494
1495         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496                 return;
1497
1498         tg3_phy_gather_ump_data(tp, data);
1499
1500         tg3_wait_for_event_ack(tp);
1501
1502         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1508
1509         tg3_generate_fw_event(tp);
1510 }
1511
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1514 {
1515         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516                 /* Wait for RX cpu to ACK the previous event. */
1517                 tg3_wait_for_event_ack(tp);
1518
1519                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520
1521                 tg3_generate_fw_event(tp);
1522
1523                 /* Wait for RX cpu to ACK this event. */
1524                 tg3_wait_for_event_ack(tp);
1525         }
1526 }
1527
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530 {
1531         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533
1534         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535                 switch (kind) {
1536                 case RESET_KIND_INIT:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_START);
1539                         break;
1540
1541                 case RESET_KIND_SHUTDOWN:
1542                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543                                       DRV_STATE_UNLOAD);
1544                         break;
1545
1546                 case RESET_KIND_SUSPEND:
1547                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548                                       DRV_STATE_SUSPEND);
1549                         break;
1550
1551                 default:
1552                         break;
1553                 }
1554         }
1555
1556         if (kind == RESET_KIND_INIT ||
1557             kind == RESET_KIND_SUSPEND)
1558                 tg3_ape_driver_state_change(tp, kind);
1559 }
1560
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563 {
1564         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565                 switch (kind) {
1566                 case RESET_KIND_INIT:
1567                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568                                       DRV_STATE_START_DONE);
1569                         break;
1570
1571                 case RESET_KIND_SHUTDOWN:
1572                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573                                       DRV_STATE_UNLOAD_DONE);
1574                         break;
1575
1576                 default:
1577                         break;
1578                 }
1579         }
1580
1581         if (kind == RESET_KIND_SHUTDOWN)
1582                 tg3_ape_driver_state_change(tp, kind);
1583 }
1584
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587 {
1588         if (tg3_flag(tp, ENABLE_ASF)) {
1589                 switch (kind) {
1590                 case RESET_KIND_INIT:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_START);
1593                         break;
1594
1595                 case RESET_KIND_SHUTDOWN:
1596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597                                       DRV_STATE_UNLOAD);
1598                         break;
1599
1600                 case RESET_KIND_SUSPEND:
1601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602                                       DRV_STATE_SUSPEND);
1603                         break;
1604
1605                 default:
1606                         break;
1607                 }
1608         }
1609 }
1610
1611 static int tg3_poll_fw(struct tg3 *tp)
1612 {
1613         int i;
1614         u32 val;
1615
1616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617                 /* Wait up to 20ms for init done. */
1618                 for (i = 0; i < 200; i++) {
1619                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620                                 return 0;
1621                         udelay(100);
1622                 }
1623                 return -ENODEV;
1624         }
1625
1626         /* Wait for firmware initialization to complete. */
1627         for (i = 0; i < 100000; i++) {
1628                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630                         break;
1631                 udelay(10);
1632         }
1633
1634         /* Chip might not be fitted with firmware.  Some Sun onboard
1635          * parts are configured like that.  So don't signal the timeout
1636          * of the above loop as an error, but do report the lack of
1637          * running firmware once.
1638          */
1639         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1641
1642                 netdev_info(tp->dev, "No firmware running\n");
1643         }
1644
1645         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646                 /* The 57765 A0 needs a little more
1647                  * time to do some important work.
1648                  */
1649                 mdelay(10);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static void tg3_link_report(struct tg3 *tp)
1656 {
1657         if (!netif_carrier_ok(tp->dev)) {
1658                 netif_info(tp, link, tp->dev, "Link is down\n");
1659                 tg3_ump_link_report(tp);
1660         } else if (netif_msg_link(tp)) {
1661                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662                             (tp->link_config.active_speed == SPEED_1000 ?
1663                              1000 :
1664                              (tp->link_config.active_speed == SPEED_100 ?
1665                               100 : 10)),
1666                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1667                              "full" : "half"));
1668
1669                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671                             "on" : "off",
1672                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673                             "on" : "off");
1674
1675                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676                         netdev_info(tp->dev, "EEE is %s\n",
1677                                     tp->setlpicnt ? "enabled" : "disabled");
1678
1679                 tg3_ump_link_report(tp);
1680         }
1681 }
1682
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684 {
1685         u16 miireg;
1686
1687         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688                 miireg = ADVERTISE_1000XPAUSE;
1689         else if (flow_ctrl & FLOW_CTRL_TX)
1690                 miireg = ADVERTISE_1000XPSE_ASYM;
1691         else if (flow_ctrl & FLOW_CTRL_RX)
1692                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693         else
1694                 miireg = 0;
1695
1696         return miireg;
1697 }
1698
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700 {
1701         u8 cap = 0;
1702
1703         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706                 if (lcladv & ADVERTISE_1000XPAUSE)
1707                         cap = FLOW_CTRL_RX;
1708                 if (rmtadv & ADVERTISE_1000XPAUSE)
1709                         cap = FLOW_CTRL_TX;
1710         }
1711
1712         return cap;
1713 }
1714
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1716 {
1717         u8 autoneg;
1718         u8 flowctrl = 0;
1719         u32 old_rx_mode = tp->rx_mode;
1720         u32 old_tx_mode = tp->tx_mode;
1721
1722         if (tg3_flag(tp, USE_PHYLIB))
1723                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724         else
1725                 autoneg = tp->link_config.autoneg;
1726
1727         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730                 else
1731                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732         } else
1733                 flowctrl = tp->link_config.flowctrl;
1734
1735         tp->link_config.active_flowctrl = flowctrl;
1736
1737         if (flowctrl & FLOW_CTRL_RX)
1738                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739         else
1740                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741
1742         if (old_rx_mode != tp->rx_mode)
1743                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1744
1745         if (flowctrl & FLOW_CTRL_TX)
1746                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747         else
1748                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749
1750         if (old_tx_mode != tp->tx_mode)
1751                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1752 }
1753
1754 static void tg3_adjust_link(struct net_device *dev)
1755 {
1756         u8 oldflowctrl, linkmesg = 0;
1757         u32 mac_mode, lcl_adv, rmt_adv;
1758         struct tg3 *tp = netdev_priv(dev);
1759         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1760
1761         spin_lock_bh(&tp->lock);
1762
1763         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764                                     MAC_MODE_HALF_DUPLEX);
1765
1766         oldflowctrl = tp->link_config.active_flowctrl;
1767
1768         if (phydev->link) {
1769                 lcl_adv = 0;
1770                 rmt_adv = 0;
1771
1772                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1774                 else if (phydev->speed == SPEED_1000 ||
1775                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777                 else
1778                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1779
1780                 if (phydev->duplex == DUPLEX_HALF)
1781                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1782                 else {
1783                         lcl_adv = mii_advertise_flowctrl(
1784                                   tp->link_config.flowctrl);
1785
1786                         if (phydev->pause)
1787                                 rmt_adv = LPA_PAUSE_CAP;
1788                         if (phydev->asym_pause)
1789                                 rmt_adv |= LPA_PAUSE_ASYM;
1790                 }
1791
1792                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793         } else
1794                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795
1796         if (mac_mode != tp->mac_mode) {
1797                 tp->mac_mode = mac_mode;
1798                 tw32_f(MAC_MODE, tp->mac_mode);
1799                 udelay(40);
1800         }
1801
1802         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803                 if (phydev->speed == SPEED_10)
1804                         tw32(MAC_MI_STAT,
1805                              MAC_MI_STAT_10MBPS_MODE |
1806                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807                 else
1808                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809         }
1810
1811         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812                 tw32(MAC_TX_LENGTHS,
1813                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814                       (6 << TX_LENGTHS_IPG_SHIFT) |
1815                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816         else
1817                 tw32(MAC_TX_LENGTHS,
1818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819                       (6 << TX_LENGTHS_IPG_SHIFT) |
1820                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821
1822         if (phydev->link != tp->old_link ||
1823             phydev->speed != tp->link_config.active_speed ||
1824             phydev->duplex != tp->link_config.active_duplex ||
1825             oldflowctrl != tp->link_config.active_flowctrl)
1826                 linkmesg = 1;
1827
1828         tp->old_link = phydev->link;
1829         tp->link_config.active_speed = phydev->speed;
1830         tp->link_config.active_duplex = phydev->duplex;
1831
1832         spin_unlock_bh(&tp->lock);
1833
1834         if (linkmesg)
1835                 tg3_link_report(tp);
1836 }
1837
1838 static int tg3_phy_init(struct tg3 *tp)
1839 {
1840         struct phy_device *phydev;
1841
1842         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843                 return 0;
1844
1845         /* Bring the PHY back to a known state. */
1846         tg3_bmcr_reset(tp);
1847
1848         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1849
1850         /* Attach the MAC to the PHY. */
1851         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852                              phydev->dev_flags, phydev->interface);
1853         if (IS_ERR(phydev)) {
1854                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855                 return PTR_ERR(phydev);
1856         }
1857
1858         /* Mask with MAC supported features. */
1859         switch (phydev->interface) {
1860         case PHY_INTERFACE_MODE_GMII:
1861         case PHY_INTERFACE_MODE_RGMII:
1862                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863                         phydev->supported &= (PHY_GBIT_FEATURES |
1864                                               SUPPORTED_Pause |
1865                                               SUPPORTED_Asym_Pause);
1866                         break;
1867                 }
1868                 /* fallthru */
1869         case PHY_INTERFACE_MODE_MII:
1870                 phydev->supported &= (PHY_BASIC_FEATURES |
1871                                       SUPPORTED_Pause |
1872                                       SUPPORTED_Asym_Pause);
1873                 break;
1874         default:
1875                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876                 return -EINVAL;
1877         }
1878
1879         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1880
1881         phydev->advertising = phydev->supported;
1882
1883         return 0;
1884 }
1885
1886 static void tg3_phy_start(struct tg3 *tp)
1887 {
1888         struct phy_device *phydev;
1889
1890         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891                 return;
1892
1893         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1894
1895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897                 phydev->speed = tp->link_config.speed;
1898                 phydev->duplex = tp->link_config.duplex;
1899                 phydev->autoneg = tp->link_config.autoneg;
1900                 phydev->advertising = tp->link_config.advertising;
1901         }
1902
1903         phy_start(phydev);
1904
1905         phy_start_aneg(phydev);
1906 }
1907
1908 static void tg3_phy_stop(struct tg3 *tp)
1909 {
1910         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911                 return;
1912
1913         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1914 }
1915
1916 static void tg3_phy_fini(struct tg3 *tp)
1917 {
1918         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1921         }
1922 }
1923
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925 {
1926         int err;
1927         u32 val;
1928
1929         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930                 return 0;
1931
1932         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933                 /* Cannot do read-modify-write on 5401 */
1934                 err = tg3_phy_auxctl_write(tp,
1935                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937                                            0x4c20);
1938                 goto done;
1939         }
1940
1941         err = tg3_phy_auxctl_read(tp,
1942                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943         if (err)
1944                 return err;
1945
1946         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947         err = tg3_phy_auxctl_write(tp,
1948                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949
1950 done:
1951         return err;
1952 }
1953
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955 {
1956         u32 phytest;
1957
1958         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959                 u32 phy;
1960
1961                 tg3_writephy(tp, MII_TG3_FET_TEST,
1962                              phytest | MII_TG3_FET_SHADOW_EN);
1963                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964                         if (enable)
1965                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966                         else
1967                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969                 }
1970                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971         }
1972 }
1973
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975 {
1976         u32 reg;
1977
1978         if (!tg3_flag(tp, 5705_PLUS) ||
1979             (tg3_flag(tp, 5717_PLUS) &&
1980              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981                 return;
1982
1983         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984                 tg3_phy_fet_toggle_apd(tp, enable);
1985                 return;
1986         }
1987
1988         reg = MII_TG3_MISC_SHDW_WREN |
1989               MII_TG3_MISC_SHDW_SCR5_SEL |
1990               MII_TG3_MISC_SHDW_SCR5_LPED |
1991               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992               MII_TG3_MISC_SHDW_SCR5_SDTL |
1993               MII_TG3_MISC_SHDW_SCR5_C125OE;
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996
1997         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_APD_SEL |
2002               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003         if (enable)
2004                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005
2006         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007 }
2008
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010 {
2011         u32 phy;
2012
2013         if (!tg3_flag(tp, 5705_PLUS) ||
2014             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015                 return;
2016
2017         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018                 u32 ephy;
2019
2020                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022
2023                         tg3_writephy(tp, MII_TG3_FET_TEST,
2024                                      ephy | MII_TG3_FET_SHADOW_EN);
2025                         if (!tg3_readphy(tp, reg, &phy)) {
2026                                 if (enable)
2027                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028                                 else
2029                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030                                 tg3_writephy(tp, reg, phy);
2031                         }
2032                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2033                 }
2034         } else {
2035                 int ret;
2036
2037                 ret = tg3_phy_auxctl_read(tp,
2038                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039                 if (!ret) {
2040                         if (enable)
2041                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042                         else
2043                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044                         tg3_phy_auxctl_write(tp,
2045                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2046                 }
2047         }
2048 }
2049
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051 {
2052         int ret;
2053         u32 val;
2054
2055         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056                 return;
2057
2058         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059         if (!ret)
2060                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2062 }
2063
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2065 {
2066         u32 otp, phy;
2067
2068         if (!tp->phy_otp)
2069                 return;
2070
2071         otp = tp->phy_otp;
2072
2073         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074                 return;
2075
2076         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079
2080         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083
2084         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087
2088         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090
2091         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093
2094         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097
2098         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 }
2100
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102 {
2103         u32 val;
2104
2105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106                 return;
2107
2108         tp->setlpicnt = 0;
2109
2110         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111             current_link_up == 1 &&
2112             tp->link_config.active_duplex == DUPLEX_FULL &&
2113             (tp->link_config.active_speed == SPEED_100 ||
2114              tp->link_config.active_speed == SPEED_1000)) {
2115                 u32 eeectl;
2116
2117                 if (tp->link_config.active_speed == SPEED_1000)
2118                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119                 else
2120                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121
2122                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123
2124                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125                                   TG3_CL45_D7_EEERES_STAT, &val);
2126
2127                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129                         tp->setlpicnt = 2;
2130         }
2131
2132         if (!tp->setlpicnt) {
2133                 if (current_link_up == 1 &&
2134                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138
2139                 val = tr32(TG3_CPMU_EEE_MODE);
2140                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141         }
2142 }
2143
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2145 {
2146         u32 val;
2147
2148         if (tp->link_config.active_speed == SPEED_1000 &&
2149             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151              tg3_flag(tp, 57765_CLASS)) &&
2152             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153                 val = MII_TG3_DSP_TAP26_ALNOKO |
2154                       MII_TG3_DSP_TAP26_RMRXSTO;
2155                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157         }
2158
2159         val = tr32(TG3_CPMU_EEE_MODE);
2160         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161 }
2162
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2164 {
2165         int limit = 100;
2166
2167         while (limit--) {
2168                 u32 tmp32;
2169
2170                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171                         if ((tmp32 & 0x1000) == 0)
2172                                 break;
2173                 }
2174         }
2175         if (limit < 0)
2176                 return -EBUSY;
2177
2178         return 0;
2179 }
2180
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182 {
2183         static const u32 test_pat[4][6] = {
2184         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188         };
2189         int chan;
2190
2191         for (chan = 0; chan < 4; chan++) {
2192                 int i;
2193
2194                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195                              (chan * 0x2000) | 0x0200);
2196                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2197
2198                 for (i = 0; i < 6; i++)
2199                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200                                      test_pat[chan][i]);
2201
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203                 if (tg3_wait_macro_done(tp)) {
2204                         *resetp = 1;
2205                         return -EBUSY;
2206                 }
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209                              (chan * 0x2000) | 0x0200);
2210                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211                 if (tg3_wait_macro_done(tp)) {
2212                         *resetp = 1;
2213                         return -EBUSY;
2214                 }
2215
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 for (i = 0; i < 6; i += 2) {
2223                         u32 low, high;
2224
2225                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227                             tg3_wait_macro_done(tp)) {
2228                                 *resetp = 1;
2229                                 return -EBUSY;
2230                         }
2231                         low &= 0x7fff;
2232                         high &= 0x000f;
2233                         if (low != test_pat[chan][i] ||
2234                             high != test_pat[chan][i+1]) {
2235                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238
2239                                 return -EBUSY;
2240                         }
2241                 }
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248 {
2249         int chan;
2250
2251         for (chan = 0; chan < 4; chan++) {
2252                 int i;
2253
2254                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255                              (chan * 0x2000) | 0x0200);
2256                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257                 for (i = 0; i < 6; i++)
2258                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260                 if (tg3_wait_macro_done(tp))
2261                         return -EBUSY;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268 {
2269         u32 reg32, phy9_orig;
2270         int retries, do_phy_reset, err;
2271
2272         retries = 10;
2273         do_phy_reset = 1;
2274         do {
2275                 if (do_phy_reset) {
2276                         err = tg3_bmcr_reset(tp);
2277                         if (err)
2278                                 return err;
2279                         do_phy_reset = 0;
2280                 }
2281
2282                 /* Disable transmitter and interrupt.  */
2283                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284                         continue;
2285
2286                 reg32 |= 0x3000;
2287                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288
2289                 /* Set full-duplex, 1000 mbps.  */
2290                 tg3_writephy(tp, MII_BMCR,
2291                              BMCR_FULLDPLX | BMCR_SPEED1000);
2292
2293                 /* Set to master mode.  */
2294                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295                         continue;
2296
2297                 tg3_writephy(tp, MII_CTRL1000,
2298                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2299
2300                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301                 if (err)
2302                         return err;
2303
2304                 /* Block the PHY control access.  */
2305                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2306
2307                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308                 if (!err)
2309                         break;
2310         } while (--retries);
2311
2312         err = tg3_phy_reset_chanpat(tp);
2313         if (err)
2314                 return err;
2315
2316         tg3_phydsp_write(tp, 0x8005, 0x0000);
2317
2318         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2320
2321         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2322
2323         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2324
2325         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326                 reg32 &= ~0x3000;
2327                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328         } else if (!err)
2329                 err = -EBUSY;
2330
2331         return err;
2332 }
2333
2334 /* This will reset the tigon3 PHY if there is no valid
2335  * link unless the FORCE argument is non-zero.
2336  */
2337 static int tg3_phy_reset(struct tg3 *tp)
2338 {
2339         u32 val, cpmuctrl;
2340         int err;
2341
2342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343                 val = tr32(GRC_MISC_CFG);
2344                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345                 udelay(40);
2346         }
2347         err  = tg3_readphy(tp, MII_BMSR, &val);
2348         err |= tg3_readphy(tp, MII_BMSR, &val);
2349         if (err != 0)
2350                 return -EBUSY;
2351
2352         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353                 netif_carrier_off(tp->dev);
2354                 tg3_link_report(tp);
2355         }
2356
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360                 err = tg3_phy_reset_5703_4_5(tp);
2361                 if (err)
2362                         return err;
2363                 goto out;
2364         }
2365
2366         cpmuctrl = 0;
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371                         tw32(TG3_CPMU_CTRL,
2372                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373         }
2374
2375         err = tg3_bmcr_reset(tp);
2376         if (err)
2377                 return err;
2378
2379         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2382
2383                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2384         }
2385
2386         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2391                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392                         udelay(40);
2393                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394                 }
2395         }
2396
2397         if (tg3_flag(tp, 5717_PLUS) &&
2398             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399                 return 0;
2400
2401         tg3_phy_apply_otp(tp);
2402
2403         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404                 tg3_phy_toggle_apd(tp, true);
2405         else
2406                 tg3_phy_toggle_apd(tp, false);
2407
2408 out:
2409         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2413                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2414         }
2415
2416         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419         }
2420
2421         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2424                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2425                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427                 }
2428         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433                                 tg3_writephy(tp, MII_TG3_TEST1,
2434                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2435                         } else
2436                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         }
2441
2442         /* Set Extended packet length bit (bit 14) on all chips that */
2443         /* support jumbo frames */
2444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445                 /* Cannot do read-modify-write on 5401 */
2446                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448                 /* Set bit 14 with read-modify-write to preserve other bits */
2449                 err = tg3_phy_auxctl_read(tp,
2450                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451                 if (!err)
2452                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2454         }
2455
2456         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457          * jumbo frames transmission.
2458          */
2459         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2463         }
2464
2465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466                 /* adjust output voltage */
2467                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2468         }
2469
2470         tg3_phy_toggle_automdix(tp, 1);
2471         tg3_phy_set_wirespeed(tp);
2472         return 0;
2473 }
2474
2475 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2477 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2478                                           TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 12))
2484
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 12))
2490
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492 {
2493         u32 status, shift;
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498         else
2499                 status = tr32(TG3_CPMU_DRV_STATUS);
2500
2501         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502         status &= ~(TG3_GPIO_MSG_MASK << shift);
2503         status |= (newstat << shift);
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508         else
2509                 tw32(TG3_CPMU_DRV_STATUS, status);
2510
2511         return status >> TG3_APE_GPIO_MSG_SHIFT;
2512 }
2513
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515 {
2516         if (!tg3_flag(tp, IS_NIC))
2517                 return 0;
2518
2519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523                         return -EIO;
2524
2525                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526
2527                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2529
2530                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531         } else {
2532                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2534         }
2535
2536         return 0;
2537 }
2538
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540 {
2541         u32 grc_local_ctrl;
2542
2543         if (!tg3_flag(tp, IS_NIC) ||
2544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546                 return;
2547
2548         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549
2550         tw32_wait_f(GRC_LOCAL_CTRL,
2551                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2553
2554         tw32_wait_f(GRC_LOCAL_CTRL,
2555                     grc_local_ctrl,
2556                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2557
2558         tw32_wait_f(GRC_LOCAL_CTRL,
2559                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2561 }
2562
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564 {
2565         if (!tg3_flag(tp, IS_NIC))
2566                 return;
2567
2568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571                             (GRC_LCLCTRL_GPIO_OE0 |
2572                              GRC_LCLCTRL_GPIO_OE1 |
2573                              GRC_LCLCTRL_GPIO_OE2 |
2574                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT1),
2576                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2577         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581                                      GRC_LCLCTRL_GPIO_OE1 |
2582                                      GRC_LCLCTRL_GPIO_OE2 |
2583                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2585                                      tp->grc_local_ctrl;
2586                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2588
2589                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2592
2593                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2596         } else {
2597                 u32 no_gpio2;
2598                 u32 grc_local_ctrl = 0;
2599
2600                 /* Workaround to prevent overdrawing Amps. */
2601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604                                     grc_local_ctrl,
2605                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2606                 }
2607
2608                 /* On 5753 and variants, GPIO2 cannot be used. */
2609                 no_gpio2 = tp->nic_sram_data_cfg &
2610                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2611
2612                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613                                   GRC_LCLCTRL_GPIO_OE1 |
2614                                   GRC_LCLCTRL_GPIO_OE2 |
2615                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2617                 if (no_gpio2) {
2618                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2620                 }
2621                 tw32_wait_f(GRC_LOCAL_CTRL,
2622                             tp->grc_local_ctrl | grc_local_ctrl,
2623                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 if (!no_gpio2) {
2632                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633                         tw32_wait_f(GRC_LOCAL_CTRL,
2634                                     tp->grc_local_ctrl | grc_local_ctrl,
2635                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2636                 }
2637         }
2638 }
2639
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2641 {
2642         u32 msg = 0;
2643
2644         /* Serialize power state transitions */
2645         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646                 return;
2647
2648         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649                 msg = TG3_GPIO_MSG_NEED_VAUX;
2650
2651         msg = tg3_set_function_status(tp, msg);
2652
2653         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654                 goto done;
2655
2656         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657                 tg3_pwrsrc_switch_to_vaux(tp);
2658         else
2659                 tg3_pwrsrc_die_with_vmain(tp);
2660
2661 done:
2662         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2663 }
2664
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2666 {
2667         bool need_vaux = false;
2668
2669         /* The GPIOs do something completely different on 57765. */
2670         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671                 return;
2672
2673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676                 tg3_frob_aux_power_5717(tp, include_wol ?
2677                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678                 return;
2679         }
2680
2681         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682                 struct net_device *dev_peer;
2683
2684                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2685
2686                 /* remove_one() may have been run on the peer. */
2687                 if (dev_peer) {
2688                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2689
2690                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2691                                 return;
2692
2693                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694                             tg3_flag(tp_peer, ENABLE_ASF))
2695                                 need_vaux = true;
2696                 }
2697         }
2698
2699         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700             tg3_flag(tp, ENABLE_ASF))
2701                 need_vaux = true;
2702
2703         if (need_vaux)
2704                 tg3_pwrsrc_switch_to_vaux(tp);
2705         else
2706                 tg3_pwrsrc_die_with_vmain(tp);
2707 }
2708
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710 {
2711         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712                 return 1;
2713         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714                 if (speed != SPEED_10)
2715                         return 1;
2716         } else if (speed == SPEED_10)
2717                 return 1;
2718
2719         return 0;
2720 }
2721
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2723 {
2724         u32 val;
2725
2726         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730
2731                         sg_dig_ctrl |=
2732                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735                 }
2736                 return;
2737         }
2738
2739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740                 tg3_bmcr_reset(tp);
2741                 val = tr32(GRC_MISC_CFG);
2742                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743                 udelay(40);
2744                 return;
2745         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746                 u32 phytest;
2747                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748                         u32 phy;
2749
2750                         tg3_writephy(tp, MII_ADVERTISE, 0);
2751                         tg3_writephy(tp, MII_BMCR,
2752                                      BMCR_ANENABLE | BMCR_ANRESTART);
2753
2754                         tg3_writephy(tp, MII_TG3_FET_TEST,
2755                                      phytest | MII_TG3_FET_SHADOW_EN);
2756                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758                                 tg3_writephy(tp,
2759                                              MII_TG3_FET_SHDW_AUXMODE4,
2760                                              phy);
2761                         }
2762                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763                 }
2764                 return;
2765         } else if (do_low_power) {
2766                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2768
2769                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2772                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2773         }
2774
2775         /* The PHY should not be powered down on some chips because
2776          * of bugs.
2777          */
2778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2782             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2783              !tp->pci_fn))
2784                 return;
2785
2786         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2787             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2788                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2789                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2790                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2791                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2792         }
2793
2794         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2795 }
2796
2797 /* tp->lock is held. */
2798 static int tg3_nvram_lock(struct tg3 *tp)
2799 {
2800         if (tg3_flag(tp, NVRAM)) {
2801                 int i;
2802
2803                 if (tp->nvram_lock_cnt == 0) {
2804                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2805                         for (i = 0; i < 8000; i++) {
2806                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2807                                         break;
2808                                 udelay(20);
2809                         }
2810                         if (i == 8000) {
2811                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2812                                 return -ENODEV;
2813                         }
2814                 }
2815                 tp->nvram_lock_cnt++;
2816         }
2817         return 0;
2818 }
2819
2820 /* tp->lock is held. */
2821 static void tg3_nvram_unlock(struct tg3 *tp)
2822 {
2823         if (tg3_flag(tp, NVRAM)) {
2824                 if (tp->nvram_lock_cnt > 0)
2825                         tp->nvram_lock_cnt--;
2826                 if (tp->nvram_lock_cnt == 0)
2827                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2828         }
2829 }
2830
2831 /* tp->lock is held. */
2832 static void tg3_enable_nvram_access(struct tg3 *tp)
2833 {
2834         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2835                 u32 nvaccess = tr32(NVRAM_ACCESS);
2836
2837                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2838         }
2839 }
2840
2841 /* tp->lock is held. */
2842 static void tg3_disable_nvram_access(struct tg3 *tp)
2843 {
2844         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2845                 u32 nvaccess = tr32(NVRAM_ACCESS);
2846
2847                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2848         }
2849 }
2850
2851 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2852                                         u32 offset, u32 *val)
2853 {
2854         u32 tmp;
2855         int i;
2856
2857         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2858                 return -EINVAL;
2859
2860         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2861                                         EEPROM_ADDR_DEVID_MASK |
2862                                         EEPROM_ADDR_READ);
2863         tw32(GRC_EEPROM_ADDR,
2864              tmp |
2865              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2866              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2867               EEPROM_ADDR_ADDR_MASK) |
2868              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2869
2870         for (i = 0; i < 1000; i++) {
2871                 tmp = tr32(GRC_EEPROM_ADDR);
2872
2873                 if (tmp & EEPROM_ADDR_COMPLETE)
2874                         break;
2875                 msleep(1);
2876         }
2877         if (!(tmp & EEPROM_ADDR_COMPLETE))
2878                 return -EBUSY;
2879
2880         tmp = tr32(GRC_EEPROM_DATA);
2881
2882         /*
2883          * The data will always be opposite the native endian
2884          * format.  Perform a blind byteswap to compensate.
2885          */
2886         *val = swab32(tmp);
2887
2888         return 0;
2889 }
2890
2891 #define NVRAM_CMD_TIMEOUT 10000
2892
2893 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2894 {
2895         int i;
2896
2897         tw32(NVRAM_CMD, nvram_cmd);
2898         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2899                 udelay(10);
2900                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2901                         udelay(10);
2902                         break;
2903                 }
2904         }
2905
2906         if (i == NVRAM_CMD_TIMEOUT)
2907                 return -EBUSY;
2908
2909         return 0;
2910 }
2911
2912 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2913 {
2914         if (tg3_flag(tp, NVRAM) &&
2915             tg3_flag(tp, NVRAM_BUFFERED) &&
2916             tg3_flag(tp, FLASH) &&
2917             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2918             (tp->nvram_jedecnum == JEDEC_ATMEL))
2919
2920                 addr = ((addr / tp->nvram_pagesize) <<
2921                         ATMEL_AT45DB0X1B_PAGE_POS) +
2922                        (addr % tp->nvram_pagesize);
2923
2924         return addr;
2925 }
2926
2927 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2928 {
2929         if (tg3_flag(tp, NVRAM) &&
2930             tg3_flag(tp, NVRAM_BUFFERED) &&
2931             tg3_flag(tp, FLASH) &&
2932             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2933             (tp->nvram_jedecnum == JEDEC_ATMEL))
2934
2935                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2936                         tp->nvram_pagesize) +
2937                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2938
2939         return addr;
2940 }
2941
2942 /* NOTE: Data read in from NVRAM is byteswapped according to
2943  * the byteswapping settings for all other register accesses.
2944  * tg3 devices are BE devices, so on a BE machine, the data
2945  * returned will be exactly as it is seen in NVRAM.  On a LE
2946  * machine, the 32-bit value will be byteswapped.
2947  */
2948 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2949 {
2950         int ret;
2951
2952         if (!tg3_flag(tp, NVRAM))
2953                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2954
2955         offset = tg3_nvram_phys_addr(tp, offset);
2956
2957         if (offset > NVRAM_ADDR_MSK)
2958                 return -EINVAL;
2959
2960         ret = tg3_nvram_lock(tp);
2961         if (ret)
2962                 return ret;
2963
2964         tg3_enable_nvram_access(tp);
2965
2966         tw32(NVRAM_ADDR, offset);
2967         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2968                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2969
2970         if (ret == 0)
2971                 *val = tr32(NVRAM_RDDATA);
2972
2973         tg3_disable_nvram_access(tp);
2974
2975         tg3_nvram_unlock(tp);
2976
2977         return ret;
2978 }
2979
2980 /* Ensures NVRAM data is in bytestream format. */
2981 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2982 {
2983         u32 v;
2984         int res = tg3_nvram_read(tp, offset, &v);
2985         if (!res)
2986                 *val = cpu_to_be32(v);
2987         return res;
2988 }
2989
2990 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2991                                     u32 offset, u32 len, u8 *buf)
2992 {
2993         int i, j, rc = 0;
2994         u32 val;
2995
2996         for (i = 0; i < len; i += 4) {
2997                 u32 addr;
2998                 __be32 data;
2999
3000                 addr = offset + i;
3001
3002                 memcpy(&data, buf + i, 4);
3003
3004                 /*
3005                  * The SEEPROM interface expects the data to always be opposite
3006                  * the native endian format.  We accomplish this by reversing
3007                  * all the operations that would have been performed on the
3008                  * data from a call to tg3_nvram_read_be32().
3009                  */
3010                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3011
3012                 val = tr32(GRC_EEPROM_ADDR);
3013                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3014
3015                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3016                         EEPROM_ADDR_READ);
3017                 tw32(GRC_EEPROM_ADDR, val |
3018                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3019                         (addr & EEPROM_ADDR_ADDR_MASK) |
3020                         EEPROM_ADDR_START |
3021                         EEPROM_ADDR_WRITE);
3022
3023                 for (j = 0; j < 1000; j++) {
3024                         val = tr32(GRC_EEPROM_ADDR);
3025
3026                         if (val & EEPROM_ADDR_COMPLETE)
3027                                 break;
3028                         msleep(1);
3029                 }
3030                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3031                         rc = -EBUSY;
3032                         break;
3033                 }
3034         }
3035
3036         return rc;
3037 }
3038
3039 /* offset and length are dword aligned */
3040 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3041                 u8 *buf)
3042 {
3043         int ret = 0;
3044         u32 pagesize = tp->nvram_pagesize;
3045         u32 pagemask = pagesize - 1;
3046         u32 nvram_cmd;
3047         u8 *tmp;
3048
3049         tmp = kmalloc(pagesize, GFP_KERNEL);
3050         if (tmp == NULL)
3051                 return -ENOMEM;
3052
3053         while (len) {
3054                 int j;
3055                 u32 phy_addr, page_off, size;
3056
3057                 phy_addr = offset & ~pagemask;
3058
3059                 for (j = 0; j < pagesize; j += 4) {
3060                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3061                                                   (__be32 *) (tmp + j));
3062                         if (ret)
3063                                 break;
3064                 }
3065                 if (ret)
3066                         break;
3067
3068                 page_off = offset & pagemask;
3069                 size = pagesize;
3070                 if (len < size)
3071                         size = len;
3072
3073                 len -= size;
3074
3075                 memcpy(tmp + page_off, buf, size);
3076
3077                 offset = offset + (pagesize - page_off);
3078
3079                 tg3_enable_nvram_access(tp);
3080
3081                 /*
3082                  * Before we can erase the flash page, we need
3083                  * to issue a special "write enable" command.
3084                  */
3085                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3086
3087                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3088                         break;
3089
3090                 /* Erase the target page */
3091                 tw32(NVRAM_ADDR, phy_addr);
3092
3093                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3094                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3095
3096                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3097                         break;
3098
3099                 /* Issue another write enable to start the write. */
3100                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3101
3102                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3103                         break;
3104
3105                 for (j = 0; j < pagesize; j += 4) {
3106                         __be32 data;
3107
3108                         data = *((__be32 *) (tmp + j));
3109
3110                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3111
3112                         tw32(NVRAM_ADDR, phy_addr + j);
3113
3114                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3115                                 NVRAM_CMD_WR;
3116
3117                         if (j == 0)
3118                                 nvram_cmd |= NVRAM_CMD_FIRST;
3119                         else if (j == (pagesize - 4))
3120                                 nvram_cmd |= NVRAM_CMD_LAST;
3121
3122                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3123                         if (ret)
3124                                 break;
3125                 }
3126                 if (ret)
3127                         break;
3128         }
3129
3130         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3131         tg3_nvram_exec_cmd(tp, nvram_cmd);
3132
3133         kfree(tmp);
3134
3135         return ret;
3136 }
3137
3138 /* offset and length are dword aligned */
3139 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3140                 u8 *buf)
3141 {
3142         int i, ret = 0;
3143
3144         for (i = 0; i < len; i += 4, offset += 4) {
3145                 u32 page_off, phy_addr, nvram_cmd;
3146                 __be32 data;
3147
3148                 memcpy(&data, buf + i, 4);
3149                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3150
3151                 page_off = offset % tp->nvram_pagesize;
3152
3153                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3154
3155                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3156
3157                 if (page_off == 0 || i == 0)
3158                         nvram_cmd |= NVRAM_CMD_FIRST;
3159                 if (page_off == (tp->nvram_pagesize - 4))
3160                         nvram_cmd |= NVRAM_CMD_LAST;
3161
3162                 if (i == (len - 4))
3163                         nvram_cmd |= NVRAM_CMD_LAST;
3164
3165                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3166                     !tg3_flag(tp, FLASH) ||
3167                     !tg3_flag(tp, 57765_PLUS))
3168                         tw32(NVRAM_ADDR, phy_addr);
3169
3170                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3171                     !tg3_flag(tp, 5755_PLUS) &&
3172                     (tp->nvram_jedecnum == JEDEC_ST) &&
3173                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3174                         u32 cmd;
3175
3176                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3177                         ret = tg3_nvram_exec_cmd(tp, cmd);
3178                         if (ret)
3179                                 break;
3180                 }
3181                 if (!tg3_flag(tp, FLASH)) {
3182                         /* We always do complete word writes to eeprom. */
3183                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3184                 }
3185
3186                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3187                 if (ret)
3188                         break;
3189         }
3190         return ret;
3191 }
3192
3193 /* offset and length are dword aligned */
3194 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3195 {
3196         int ret;
3197
3198         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3199                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3200                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3201                 udelay(40);
3202         }
3203
3204         if (!tg3_flag(tp, NVRAM)) {
3205                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3206         } else {
3207                 u32 grc_mode;
3208
3209                 ret = tg3_nvram_lock(tp);
3210                 if (ret)
3211                         return ret;
3212
3213                 tg3_enable_nvram_access(tp);
3214                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3215                         tw32(NVRAM_WRITE1, 0x406);
3216
3217                 grc_mode = tr32(GRC_MODE);
3218                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3219
3220                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3221                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3222                                 buf);
3223                 } else {
3224                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3225                                 buf);
3226                 }
3227
3228                 grc_mode = tr32(GRC_MODE);
3229                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3230
3231                 tg3_disable_nvram_access(tp);
3232                 tg3_nvram_unlock(tp);
3233         }
3234
3235         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3236                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3237                 udelay(40);
3238         }
3239
3240         return ret;
3241 }
3242
3243 #define RX_CPU_SCRATCH_BASE     0x30000
3244 #define RX_CPU_SCRATCH_SIZE     0x04000
3245 #define TX_CPU_SCRATCH_BASE     0x34000
3246 #define TX_CPU_SCRATCH_SIZE     0x04000
3247
3248 /* tp->lock is held. */
3249 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3250 {
3251         int i;
3252
3253         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3254
3255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3256                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3257
3258                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3259                 return 0;
3260         }
3261         if (offset == RX_CPU_BASE) {
3262                 for (i = 0; i < 10000; i++) {
3263                         tw32(offset + CPU_STATE, 0xffffffff);
3264                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3265                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3266                                 break;
3267                 }
3268
3269                 tw32(offset + CPU_STATE, 0xffffffff);
3270                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3271                 udelay(10);
3272         } else {
3273                 for (i = 0; i < 10000; i++) {
3274                         tw32(offset + CPU_STATE, 0xffffffff);
3275                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3276                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3277                                 break;
3278                 }
3279         }
3280
3281         if (i >= 10000) {
3282                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3283                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3284                 return -ENODEV;
3285         }
3286
3287         /* Clear firmware's nvram arbitration. */
3288         if (tg3_flag(tp, NVRAM))
3289                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3290         return 0;
3291 }
3292
3293 struct fw_info {
3294         unsigned int fw_base;
3295         unsigned int fw_len;
3296         const __be32 *fw_data;
3297 };
3298
3299 /* tp->lock is held. */
3300 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3301                                  u32 cpu_scratch_base, int cpu_scratch_size,
3302                                  struct fw_info *info)
3303 {
3304         int err, lock_err, i;
3305         void (*write_op)(struct tg3 *, u32, u32);
3306
3307         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3308                 netdev_err(tp->dev,
3309                            "%s: Trying to load TX cpu firmware which is 5705\n",
3310                            __func__);
3311                 return -EINVAL;
3312         }
3313
3314         if (tg3_flag(tp, 5705_PLUS))
3315                 write_op = tg3_write_mem;
3316         else
3317                 write_op = tg3_write_indirect_reg32;
3318
3319         /* It is possible that bootcode is still loading at this point.
3320          * Get the nvram lock first before halting the cpu.
3321          */
3322         lock_err = tg3_nvram_lock(tp);
3323         err = tg3_halt_cpu(tp, cpu_base);
3324         if (!lock_err)
3325                 tg3_nvram_unlock(tp);
3326         if (err)
3327                 goto out;
3328
3329         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3330                 write_op(tp, cpu_scratch_base + i, 0);
3331         tw32(cpu_base + CPU_STATE, 0xffffffff);
3332         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3333         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3334                 write_op(tp, (cpu_scratch_base +
3335                               (info->fw_base & 0xffff) +
3336                               (i * sizeof(u32))),
3337                               be32_to_cpu(info->fw_data[i]));
3338
3339         err = 0;
3340
3341 out:
3342         return err;
3343 }
3344
3345 /* tp->lock is held. */
3346 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3347 {
3348         struct fw_info info;
3349         const __be32 *fw_data;
3350         int err, i;
3351
3352         fw_data = (void *)tp->fw->data;
3353
3354         /* Firmware blob starts with version numbers, followed by
3355            start address and length. We are setting complete length.
3356            length = end_address_of_bss - start_address_of_text.
3357            Remainder is the blob to be loaded contiguously
3358            from start address. */
3359
3360         info.fw_base = be32_to_cpu(fw_data[1]);
3361         info.fw_len = tp->fw->size - 12;
3362         info.fw_data = &fw_data[3];
3363
3364         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3365                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3366                                     &info);
3367         if (err)
3368                 return err;
3369
3370         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3371                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3372                                     &info);
3373         if (err)
3374                 return err;
3375
3376         /* Now startup only the RX cpu. */
3377         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3378         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3379
3380         for (i = 0; i < 5; i++) {
3381                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3382                         break;
3383                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3384                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3385                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3386                 udelay(1000);
3387         }
3388         if (i >= 5) {
3389                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3390                            "should be %08x\n", __func__,
3391                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3392                 return -ENODEV;
3393         }
3394         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3395         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3396
3397         return 0;
3398 }
3399
3400 /* tp->lock is held. */
3401 static int tg3_load_tso_firmware(struct tg3 *tp)
3402 {
3403         struct fw_info info;
3404         const __be32 *fw_data;
3405         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3406         int err, i;
3407
3408         if (tg3_flag(tp, HW_TSO_1) ||
3409             tg3_flag(tp, HW_TSO_2) ||
3410             tg3_flag(tp, HW_TSO_3))
3411                 return 0;
3412
3413         fw_data = (void *)tp->fw->data;
3414
3415         /* Firmware blob starts with version numbers, followed by
3416            start address and length. We are setting complete length.
3417            length = end_address_of_bss - start_address_of_text.
3418            Remainder is the blob to be loaded contiguously
3419            from start address. */
3420
3421         info.fw_base = be32_to_cpu(fw_data[1]);
3422         cpu_scratch_size = tp->fw_len;
3423         info.fw_len = tp->fw->size - 12;
3424         info.fw_data = &fw_data[3];
3425
3426         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3427                 cpu_base = RX_CPU_BASE;
3428                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3429         } else {
3430                 cpu_base = TX_CPU_BASE;
3431                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3432                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3433         }
3434
3435         err = tg3_load_firmware_cpu(tp, cpu_base,
3436                                     cpu_scratch_base, cpu_scratch_size,
3437                                     &info);
3438         if (err)
3439                 return err;
3440
3441         /* Now startup the cpu. */
3442         tw32(cpu_base + CPU_STATE, 0xffffffff);
3443         tw32_f(cpu_base + CPU_PC, info.fw_base);
3444
3445         for (i = 0; i < 5; i++) {
3446                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3447                         break;
3448                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3449                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3450                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3451                 udelay(1000);
3452         }
3453         if (i >= 5) {
3454                 netdev_err(tp->dev,
3455                            "%s fails to set CPU PC, is %08x should be %08x\n",
3456                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3457                 return -ENODEV;
3458         }
3459         tw32(cpu_base + CPU_STATE, 0xffffffff);
3460         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3461         return 0;
3462 }
3463
3464
3465 /* tp->lock is held. */
3466 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3467 {
3468         u32 addr_high, addr_low;
3469         int i;
3470
3471         addr_high = ((tp->dev->dev_addr[0] << 8) |
3472                      tp->dev->dev_addr[1]);
3473         addr_low = ((tp->dev->dev_addr[2] << 24) |
3474                     (tp->dev->dev_addr[3] << 16) |
3475                     (tp->dev->dev_addr[4] <<  8) |
3476                     (tp->dev->dev_addr[5] <<  0));
3477         for (i = 0; i < 4; i++) {
3478                 if (i == 1 && skip_mac_1)
3479                         continue;
3480                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3481                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3482         }
3483
3484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3486                 for (i = 0; i < 12; i++) {
3487                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3488                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3489                 }
3490         }
3491
3492         addr_high = (tp->dev->dev_addr[0] +
3493                      tp->dev->dev_addr[1] +
3494                      tp->dev->dev_addr[2] +
3495                      tp->dev->dev_addr[3] +
3496                      tp->dev->dev_addr[4] +
3497                      tp->dev->dev_addr[5]) &
3498                 TX_BACKOFF_SEED_MASK;
3499         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3500 }
3501
3502 static void tg3_enable_register_access(struct tg3 *tp)
3503 {
3504         /*
3505          * Make sure register accesses (indirect or otherwise) will function
3506          * correctly.
3507          */
3508         pci_write_config_dword(tp->pdev,
3509                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3510 }
3511
3512 static int tg3_power_up(struct tg3 *tp)
3513 {
3514         int err;
3515
3516         tg3_enable_register_access(tp);
3517
3518         err = pci_set_power_state(tp->pdev, PCI_D0);
3519         if (!err) {
3520                 /* Switch out of Vaux if it is a NIC */
3521                 tg3_pwrsrc_switch_to_vmain(tp);
3522         } else {
3523                 netdev_err(tp->dev, "Transition to D0 failed\n");
3524         }
3525
3526         return err;
3527 }
3528
3529 static int tg3_setup_phy(struct tg3 *, int);
3530
3531 static int tg3_power_down_prepare(struct tg3 *tp)
3532 {
3533         u32 misc_host_ctrl;
3534         bool device_should_wake, do_low_power;
3535
3536         tg3_enable_register_access(tp);
3537
3538         /* Restore the CLKREQ setting. */
3539         if (tg3_flag(tp, CLKREQ_BUG)) {
3540                 u16 lnkctl;
3541
3542                 pci_read_config_word(tp->pdev,
3543                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3544                                      &lnkctl);
3545                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3546                 pci_write_config_word(tp->pdev,
3547                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3548                                       lnkctl);
3549         }
3550
3551         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3552         tw32(TG3PCI_MISC_HOST_CTRL,
3553              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3554
3555         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3556                              tg3_flag(tp, WOL_ENABLE);
3557
3558         if (tg3_flag(tp, USE_PHYLIB)) {
3559                 do_low_power = false;
3560                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3561                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3562                         struct phy_device *phydev;
3563                         u32 phyid, advertising;
3564
3565                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3566
3567                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3568
3569                         tp->link_config.speed = phydev->speed;
3570                         tp->link_config.duplex = phydev->duplex;
3571                         tp->link_config.autoneg = phydev->autoneg;
3572                         tp->link_config.advertising = phydev->advertising;
3573
3574                         advertising = ADVERTISED_TP |
3575                                       ADVERTISED_Pause |
3576                                       ADVERTISED_Autoneg |
3577                                       ADVERTISED_10baseT_Half;
3578
3579                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3580                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3581                                         advertising |=
3582                                                 ADVERTISED_100baseT_Half |
3583                                                 ADVERTISED_100baseT_Full |
3584                                                 ADVERTISED_10baseT_Full;
3585                                 else
3586                                         advertising |= ADVERTISED_10baseT_Full;
3587                         }
3588
3589                         phydev->advertising = advertising;
3590
3591                         phy_start_aneg(phydev);
3592
3593                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3594                         if (phyid != PHY_ID_BCMAC131) {
3595                                 phyid &= PHY_BCM_OUI_MASK;
3596                                 if (phyid == PHY_BCM_OUI_1 ||
3597                                     phyid == PHY_BCM_OUI_2 ||
3598                                     phyid == PHY_BCM_OUI_3)
3599                                         do_low_power = true;
3600                         }
3601                 }
3602         } else {
3603                 do_low_power = true;
3604
3605                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3606                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3607
3608                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3609                         tg3_setup_phy(tp, 0);
3610         }
3611
3612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3613                 u32 val;
3614
3615                 val = tr32(GRC_VCPU_EXT_CTRL);
3616                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3617         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3618                 int i;
3619                 u32 val;
3620
3621                 for (i = 0; i < 200; i++) {
3622                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3623                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3624                                 break;
3625                         msleep(1);
3626                 }
3627         }
3628         if (tg3_flag(tp, WOL_CAP))
3629                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3630                                                      WOL_DRV_STATE_SHUTDOWN |
3631                                                      WOL_DRV_WOL |
3632                                                      WOL_SET_MAGIC_PKT);
3633
3634         if (device_should_wake) {
3635                 u32 mac_mode;
3636
3637                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3638                         if (do_low_power &&
3639                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3640                                 tg3_phy_auxctl_write(tp,
3641                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3642                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3643                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3644                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3645                                 udelay(40);
3646                         }
3647
3648                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3649                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3650                         else
3651                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3652
3653                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3654                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3655                             ASIC_REV_5700) {
3656                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3657                                              SPEED_100 : SPEED_10;
3658                                 if (tg3_5700_link_polarity(tp, speed))
3659                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3660                                 else
3661                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3662                         }
3663                 } else {
3664                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3665                 }
3666
3667                 if (!tg3_flag(tp, 5750_PLUS))
3668                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3669
3670                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3671                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3672                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3673                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3674
3675                 if (tg3_flag(tp, ENABLE_APE))
3676                         mac_mode |= MAC_MODE_APE_TX_EN |
3677                                     MAC_MODE_APE_RX_EN |
3678                                     MAC_MODE_TDE_ENABLE;
3679
3680                 tw32_f(MAC_MODE, mac_mode);
3681                 udelay(100);
3682
3683                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3684                 udelay(10);
3685         }
3686
3687         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3688             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3689              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3690                 u32 base_val;
3691
3692                 base_val = tp->pci_clock_ctrl;
3693                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3694                              CLOCK_CTRL_TXCLK_DISABLE);
3695
3696                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3697                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3698         } else if (tg3_flag(tp, 5780_CLASS) ||
3699                    tg3_flag(tp, CPMU_PRESENT) ||
3700                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3701                 /* do nothing */
3702         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3703                 u32 newbits1, newbits2;
3704
3705                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3706                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3707                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3708                                     CLOCK_CTRL_TXCLK_DISABLE |
3709                                     CLOCK_CTRL_ALTCLK);
3710                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3711                 } else if (tg3_flag(tp, 5705_PLUS)) {
3712                         newbits1 = CLOCK_CTRL_625_CORE;
3713                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3714                 } else {
3715                         newbits1 = CLOCK_CTRL_ALTCLK;
3716                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717                 }
3718
3719                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3720                             40);
3721
3722                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3723                             40);
3724
3725                 if (!tg3_flag(tp, 5705_PLUS)) {
3726                         u32 newbits3;
3727
3728                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3729                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3730                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3731                                             CLOCK_CTRL_TXCLK_DISABLE |
3732                                             CLOCK_CTRL_44MHZ_CORE);
3733                         } else {
3734                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3735                         }
3736
3737                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3738                                     tp->pci_clock_ctrl | newbits3, 40);
3739                 }
3740         }
3741
3742         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3743                 tg3_power_down_phy(tp, do_low_power);
3744
3745         tg3_frob_aux_power(tp, true);
3746
3747         /* Workaround for unstable PLL clock */
3748         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3749             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3750                 u32 val = tr32(0x7d00);
3751
3752                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3753                 tw32(0x7d00, val);
3754                 if (!tg3_flag(tp, ENABLE_ASF)) {
3755                         int err;
3756
3757                         err = tg3_nvram_lock(tp);
3758                         tg3_halt_cpu(tp, RX_CPU_BASE);
3759                         if (!err)
3760                                 tg3_nvram_unlock(tp);
3761                 }
3762         }
3763
3764         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3765
3766         return 0;
3767 }
3768
3769 static void tg3_power_down(struct tg3 *tp)
3770 {
3771         tg3_power_down_prepare(tp);
3772
3773         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3774         pci_set_power_state(tp->pdev, PCI_D3hot);
3775 }
3776
3777 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3778 {
3779         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3780         case MII_TG3_AUX_STAT_10HALF:
3781                 *speed = SPEED_10;
3782                 *duplex = DUPLEX_HALF;
3783                 break;
3784
3785         case MII_TG3_AUX_STAT_10FULL:
3786                 *speed = SPEED_10;
3787                 *duplex = DUPLEX_FULL;
3788                 break;
3789
3790         case MII_TG3_AUX_STAT_100HALF:
3791                 *speed = SPEED_100;
3792                 *duplex = DUPLEX_HALF;
3793                 break;
3794
3795         case MII_TG3_AUX_STAT_100FULL:
3796                 *speed = SPEED_100;
3797                 *duplex = DUPLEX_FULL;
3798                 break;
3799
3800         case MII_TG3_AUX_STAT_1000HALF:
3801                 *speed = SPEED_1000;
3802                 *duplex = DUPLEX_HALF;
3803                 break;
3804
3805         case MII_TG3_AUX_STAT_1000FULL:
3806                 *speed = SPEED_1000;
3807                 *duplex = DUPLEX_FULL;
3808                 break;
3809
3810         default:
3811                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3812                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3813                                  SPEED_10;
3814                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3815                                   DUPLEX_HALF;
3816                         break;
3817                 }
3818                 *speed = SPEED_UNKNOWN;
3819                 *duplex = DUPLEX_UNKNOWN;
3820                 break;
3821         }
3822 }
3823
3824 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3825 {
3826         int err = 0;
3827         u32 val, new_adv;
3828
3829         new_adv = ADVERTISE_CSMA;
3830         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3831         new_adv |= mii_advertise_flowctrl(flowctrl);
3832
3833         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3834         if (err)
3835                 goto done;
3836
3837         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3838                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3839
3840                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3841                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3842                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3843
3844                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3845                 if (err)
3846                         goto done;
3847         }
3848
3849         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3850                 goto done;
3851
3852         tw32(TG3_CPMU_EEE_MODE,
3853              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3854
3855         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3856         if (!err) {
3857                 u32 err2;
3858
3859                 val = 0;
3860                 /* Advertise 100-BaseTX EEE ability */
3861                 if (advertise & ADVERTISED_100baseT_Full)
3862                         val |= MDIO_AN_EEE_ADV_100TX;
3863                 /* Advertise 1000-BaseT EEE ability */
3864                 if (advertise & ADVERTISED_1000baseT_Full)
3865                         val |= MDIO_AN_EEE_ADV_1000T;
3866                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3867                 if (err)
3868                         val = 0;
3869
3870                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3871                 case ASIC_REV_5717:
3872                 case ASIC_REV_57765:
3873                 case ASIC_REV_57766:
3874                 case ASIC_REV_5719:
3875                         /* If we advertised any eee advertisements above... */
3876                         if (val)
3877                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3878                                       MII_TG3_DSP_TAP26_RMRXSTO |
3879                                       MII_TG3_DSP_TAP26_OPCSINPT;
3880                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3881                         /* Fall through */
3882                 case ASIC_REV_5720:
3883                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3884                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3885                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3886                 }
3887
3888                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3889                 if (!err)
3890                         err = err2;
3891         }
3892
3893 done:
3894         return err;
3895 }
3896
3897 static void tg3_phy_copper_begin(struct tg3 *tp)
3898 {
3899         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3900             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3901                 u32 adv, fc;
3902
3903                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3904                         adv = ADVERTISED_10baseT_Half |
3905                               ADVERTISED_10baseT_Full;
3906                         if (tg3_flag(tp, WOL_SPEED_100MB))
3907                                 adv |= ADVERTISED_100baseT_Half |
3908                                        ADVERTISED_100baseT_Full;
3909
3910                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3911                 } else {
3912                         adv = tp->link_config.advertising;
3913                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3914                                 adv &= ~(ADVERTISED_1000baseT_Half |
3915                                          ADVERTISED_1000baseT_Full);
3916
3917                         fc = tp->link_config.flowctrl;
3918                 }
3919
3920                 tg3_phy_autoneg_cfg(tp, adv, fc);
3921
3922                 tg3_writephy(tp, MII_BMCR,
3923                              BMCR_ANENABLE | BMCR_ANRESTART);
3924         } else {
3925                 int i;
3926                 u32 bmcr, orig_bmcr;
3927
3928                 tp->link_config.active_speed = tp->link_config.speed;
3929                 tp->link_config.active_duplex = tp->link_config.duplex;
3930
3931                 bmcr = 0;
3932                 switch (tp->link_config.speed) {
3933                 default:
3934                 case SPEED_10:
3935                         break;
3936
3937                 case SPEED_100:
3938                         bmcr |= BMCR_SPEED100;
3939                         break;
3940
3941                 case SPEED_1000:
3942                         bmcr |= BMCR_SPEED1000;
3943                         break;
3944                 }
3945
3946                 if (tp->link_config.duplex == DUPLEX_FULL)
3947                         bmcr |= BMCR_FULLDPLX;
3948
3949                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3950                     (bmcr != orig_bmcr)) {
3951                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3952                         for (i = 0; i < 1500; i++) {
3953                                 u32 tmp;
3954
3955                                 udelay(10);
3956                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3957                                     tg3_readphy(tp, MII_BMSR, &tmp))
3958                                         continue;
3959                                 if (!(tmp & BMSR_LSTATUS)) {
3960                                         udelay(40);
3961                                         break;
3962                                 }
3963                         }
3964                         tg3_writephy(tp, MII_BMCR, bmcr);
3965                         udelay(40);
3966                 }
3967         }
3968 }
3969
3970 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3971 {
3972         int err;
3973
3974         /* Turn off tap power management. */
3975         /* Set Extended packet length bit */
3976         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3977
3978         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3979         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3980         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3981         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3982         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3983
3984         udelay(40);
3985
3986         return err;
3987 }
3988
3989 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3990 {
3991         u32 advmsk, tgtadv, advertising;
3992
3993         advertising = tp->link_config.advertising;
3994         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3995
3996         advmsk = ADVERTISE_ALL;
3997         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3998                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3999                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4000         }
4001
4002         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4003                 return false;
4004
4005         if ((*lcladv & advmsk) != tgtadv)
4006                 return false;
4007
4008         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4009                 u32 tg3_ctrl;
4010
4011                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4012
4013                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4014                         return false;
4015
4016                 if (tgtadv &&
4017                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4018                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4019                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4020                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4021                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4022                 } else {
4023                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4024                 }
4025
4026                 if (tg3_ctrl != tgtadv)
4027                         return false;
4028         }
4029
4030         return true;
4031 }
4032
4033 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4034 {
4035         u32 lpeth = 0;
4036
4037         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4038                 u32 val;
4039
4040                 if (tg3_readphy(tp, MII_STAT1000, &val))
4041                         return false;
4042
4043                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4044         }
4045
4046         if (tg3_readphy(tp, MII_LPA, rmtadv))
4047                 return false;
4048
4049         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4050         tp->link_config.rmt_adv = lpeth;
4051
4052         return true;
4053 }
4054
4055 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4056 {
4057         int current_link_up;
4058         u32 bmsr, val;
4059         u32 lcl_adv, rmt_adv;
4060         u16 current_speed;
4061         u8 current_duplex;
4062         int i, err;
4063
4064         tw32(MAC_EVENT, 0);
4065
4066         tw32_f(MAC_STATUS,
4067              (MAC_STATUS_SYNC_CHANGED |
4068               MAC_STATUS_CFG_CHANGED |
4069               MAC_STATUS_MI_COMPLETION |
4070               MAC_STATUS_LNKSTATE_CHANGED));
4071         udelay(40);
4072
4073         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4074                 tw32_f(MAC_MI_MODE,
4075                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4076                 udelay(80);
4077         }
4078
4079         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4080
4081         /* Some third-party PHYs need to be reset on link going
4082          * down.
4083          */
4084         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4085              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4086              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4087             netif_carrier_ok(tp->dev)) {
4088                 tg3_readphy(tp, MII_BMSR, &bmsr);
4089                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4090                     !(bmsr & BMSR_LSTATUS))
4091                         force_reset = 1;
4092         }
4093         if (force_reset)
4094                 tg3_phy_reset(tp);
4095
4096         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4097                 tg3_readphy(tp, MII_BMSR, &bmsr);
4098                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4099                     !tg3_flag(tp, INIT_COMPLETE))
4100                         bmsr = 0;
4101
4102                 if (!(bmsr & BMSR_LSTATUS)) {
4103                         err = tg3_init_5401phy_dsp(tp);
4104                         if (err)
4105                                 return err;
4106
4107                         tg3_readphy(tp, MII_BMSR, &bmsr);
4108                         for (i = 0; i < 1000; i++) {
4109                                 udelay(10);
4110                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4111                                     (bmsr & BMSR_LSTATUS)) {
4112                                         udelay(40);
4113                                         break;
4114                                 }
4115                         }
4116
4117                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4118                             TG3_PHY_REV_BCM5401_B0 &&
4119                             !(bmsr & BMSR_LSTATUS) &&
4120                             tp->link_config.active_speed == SPEED_1000) {
4121                                 err = tg3_phy_reset(tp);
4122                                 if (!err)
4123                                         err = tg3_init_5401phy_dsp(tp);
4124                                 if (err)
4125                                         return err;
4126                         }
4127                 }
4128         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4129                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4130                 /* 5701 {A0,B0} CRC bug workaround */
4131                 tg3_writephy(tp, 0x15, 0x0a75);
4132                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4133                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4134                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4135         }
4136
4137         /* Clear pending interrupts... */
4138         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4139         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4140
4141         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4142                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4143         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4144                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4145
4146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4148                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4149                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4150                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4151                 else
4152                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4153         }
4154
4155         current_link_up = 0;
4156         current_speed = SPEED_UNKNOWN;
4157         current_duplex = DUPLEX_UNKNOWN;
4158         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4159         tp->link_config.rmt_adv = 0;
4160
4161         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4162                 err = tg3_phy_auxctl_read(tp,
4163                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4164                                           &val);
4165                 if (!err && !(val & (1 << 10))) {
4166                         tg3_phy_auxctl_write(tp,
4167                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4168                                              val | (1 << 10));
4169                         goto relink;
4170                 }
4171         }
4172
4173         bmsr = 0;
4174         for (i = 0; i < 100; i++) {
4175                 tg3_readphy(tp, MII_BMSR, &bmsr);
4176                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4177                     (bmsr & BMSR_LSTATUS))
4178                         break;
4179                 udelay(40);
4180         }
4181
4182         if (bmsr & BMSR_LSTATUS) {
4183                 u32 aux_stat, bmcr;
4184
4185                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4186                 for (i = 0; i < 2000; i++) {
4187                         udelay(10);
4188                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4189                             aux_stat)
4190                                 break;
4191                 }
4192
4193                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4194                                              &current_speed,
4195                                              &current_duplex);
4196
4197                 bmcr = 0;
4198                 for (i = 0; i < 200; i++) {
4199                         tg3_readphy(tp, MII_BMCR, &bmcr);
4200                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4201                                 continue;
4202                         if (bmcr && bmcr != 0x7fff)
4203                                 break;
4204                         udelay(10);
4205                 }
4206
4207                 lcl_adv = 0;
4208                 rmt_adv = 0;
4209
4210                 tp->link_config.active_speed = current_speed;
4211                 tp->link_config.active_duplex = current_duplex;
4212
4213                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4214                         if ((bmcr & BMCR_ANENABLE) &&
4215                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4216                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4217                                 current_link_up = 1;
4218                 } else {
4219                         if (!(bmcr & BMCR_ANENABLE) &&
4220                             tp->link_config.speed == current_speed &&
4221                             tp->link_config.duplex == current_duplex &&
4222                             tp->link_config.flowctrl ==
4223                             tp->link_config.active_flowctrl) {
4224                                 current_link_up = 1;
4225                         }
4226                 }
4227
4228                 if (current_link_up == 1 &&
4229                     tp->link_config.active_duplex == DUPLEX_FULL) {
4230                         u32 reg, bit;
4231
4232                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4233                                 reg = MII_TG3_FET_GEN_STAT;
4234                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4235                         } else {
4236                                 reg = MII_TG3_EXT_STAT;
4237                                 bit = MII_TG3_EXT_STAT_MDIX;
4238                         }
4239
4240                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4241                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4242
4243                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4244                 }
4245         }
4246
4247 relink:
4248         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4249                 tg3_phy_copper_begin(tp);
4250
4251                 tg3_readphy(tp, MII_BMSR, &bmsr);
4252                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4253                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4254                         current_link_up = 1;
4255         }
4256
4257         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4258         if (current_link_up == 1) {
4259                 if (tp->link_config.active_speed == SPEED_100 ||
4260                     tp->link_config.active_speed == SPEED_10)
4261                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4262                 else
4263                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4264         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4265                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4266         else
4267                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4268
4269         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4270         if (tp->link_config.active_duplex == DUPLEX_HALF)
4271                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4272
4273         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4274                 if (current_link_up == 1 &&
4275                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4276                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4277                 else
4278                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4279         }
4280
4281         /* ??? Without this setting Netgear GA302T PHY does not
4282          * ??? send/receive packets...
4283          */
4284         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4285             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4286                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4287                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4288                 udelay(80);
4289         }
4290
4291         tw32_f(MAC_MODE, tp->mac_mode);
4292         udelay(40);
4293
4294         tg3_phy_eee_adjust(tp, current_link_up);
4295
4296         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4297                 /* Polled via timer. */
4298                 tw32_f(MAC_EVENT, 0);
4299         } else {
4300                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4301         }
4302         udelay(40);
4303
4304         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4305             current_link_up == 1 &&
4306             tp->link_config.active_speed == SPEED_1000 &&
4307             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4308                 udelay(120);
4309                 tw32_f(MAC_STATUS,
4310                      (MAC_STATUS_SYNC_CHANGED |
4311                       MAC_STATUS_CFG_CHANGED));
4312                 udelay(40);
4313                 tg3_write_mem(tp,
4314                               NIC_SRAM_FIRMWARE_MBOX,
4315                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4316         }
4317
4318         /* Prevent send BD corruption. */
4319         if (tg3_flag(tp, CLKREQ_BUG)) {
4320                 u16 oldlnkctl, newlnkctl;
4321
4322                 pci_read_config_word(tp->pdev,
4323                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4324                                      &oldlnkctl);
4325                 if (tp->link_config.active_speed == SPEED_100 ||
4326                     tp->link_config.active_speed == SPEED_10)
4327                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4328                 else
4329                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4330                 if (newlnkctl != oldlnkctl)
4331                         pci_write_config_word(tp->pdev,
4332                                               pci_pcie_cap(tp->pdev) +
4333                                               PCI_EXP_LNKCTL, newlnkctl);
4334         }
4335
4336         if (current_link_up != netif_carrier_ok(tp->dev)) {
4337                 if (current_link_up)
4338                         netif_carrier_on(tp->dev);
4339                 else
4340                         netif_carrier_off(tp->dev);
4341                 tg3_link_report(tp);
4342         }
4343
4344         return 0;
4345 }
4346
4347 struct tg3_fiber_aneginfo {
4348         int state;
4349 #define ANEG_STATE_UNKNOWN              0
4350 #define ANEG_STATE_AN_ENABLE            1
4351 #define ANEG_STATE_RESTART_INIT         2
4352 #define ANEG_STATE_RESTART              3
4353 #define ANEG_STATE_DISABLE_LINK_OK      4
4354 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4355 #define ANEG_STATE_ABILITY_DETECT       6
4356 #define ANEG_STATE_ACK_DETECT_INIT      7
4357 #define ANEG_STATE_ACK_DETECT           8
4358 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4359 #define ANEG_STATE_COMPLETE_ACK         10
4360 #define ANEG_STATE_IDLE_DETECT_INIT     11
4361 #define ANEG_STATE_IDLE_DETECT          12
4362 #define ANEG_STATE_LINK_OK              13
4363 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4364 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4365
4366         u32 flags;
4367 #define MR_AN_ENABLE            0x00000001
4368 #define MR_RESTART_AN           0x00000002
4369 #define MR_AN_COMPLETE          0x00000004
4370 #define MR_PAGE_RX              0x00000008
4371 #define MR_NP_LOADED            0x00000010
4372 #define MR_TOGGLE_TX            0x00000020
4373 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4374 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4375 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4376 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4377 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4378 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4379 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4380 #define MR_TOGGLE_RX            0x00002000
4381 #define MR_NP_RX                0x00004000
4382
4383 #define MR_LINK_OK              0x80000000
4384
4385         unsigned long link_time, cur_time;
4386
4387         u32 ability_match_cfg;
4388         int ability_match_count;
4389
4390         char ability_match, idle_match, ack_match;
4391
4392         u32 txconfig, rxconfig;
4393 #define ANEG_CFG_NP             0x00000080
4394 #define ANEG_CFG_ACK            0x00000040
4395 #define ANEG_CFG_RF2            0x00000020
4396 #define ANEG_CFG_RF1            0x00000010
4397 #define ANEG_CFG_PS2            0x00000001
4398 #define ANEG_CFG_PS1            0x00008000
4399 #define ANEG_CFG_HD             0x00004000
4400 #define ANEG_CFG_FD             0x00002000
4401 #define ANEG_CFG_INVAL          0x00001f06
4402
4403 };
4404 #define ANEG_OK         0
4405 #define ANEG_DONE       1
4406 #define ANEG_TIMER_ENAB 2
4407 #define ANEG_FAILED     -1
4408
4409 #define ANEG_STATE_SETTLE_TIME  10000
4410
4411 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4412                                    struct tg3_fiber_aneginfo *ap)
4413 {
4414         u16 flowctrl;
4415         unsigned long delta;
4416         u32 rx_cfg_reg;
4417         int ret;
4418
4419         if (ap->state == ANEG_STATE_UNKNOWN) {
4420                 ap->rxconfig = 0;
4421                 ap->link_time = 0;
4422                 ap->cur_time = 0;
4423                 ap->ability_match_cfg = 0;
4424                 ap->ability_match_count = 0;
4425                 ap->ability_match = 0;
4426                 ap->idle_match = 0;
4427                 ap->ack_match = 0;
4428         }
4429         ap->cur_time++;
4430
4431         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4432                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4433
4434                 if (rx_cfg_reg != ap->ability_match_cfg) {
4435                         ap->ability_match_cfg = rx_cfg_reg;
4436                         ap->ability_match = 0;
4437                         ap->ability_match_count = 0;
4438                 } else {
4439                         if (++ap->ability_match_count > 1) {
4440                                 ap->ability_match = 1;
4441                                 ap->ability_match_cfg = rx_cfg_reg;
4442                         }
4443                 }
4444                 if (rx_cfg_reg & ANEG_CFG_ACK)
4445                         ap->ack_match = 1;
4446                 else
4447                         ap->ack_match = 0;
4448
4449                 ap->idle_match = 0;
4450         } else {
4451                 ap->idle_match = 1;
4452                 ap->ability_match_cfg = 0;
4453                 ap->ability_match_count = 0;
4454                 ap->ability_match = 0;
4455                 ap->ack_match = 0;
4456
4457                 rx_cfg_reg = 0;
4458         }
4459
4460         ap->rxconfig = rx_cfg_reg;
4461         ret = ANEG_OK;
4462
4463         switch (ap->state) {
4464         case ANEG_STATE_UNKNOWN:
4465                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4466                         ap->state = ANEG_STATE_AN_ENABLE;
4467
4468                 /* fallthru */
4469         case ANEG_STATE_AN_ENABLE:
4470                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4471                 if (ap->flags & MR_AN_ENABLE) {
4472                         ap->link_time = 0;
4473                         ap->cur_time = 0;
4474                         ap->ability_match_cfg = 0;
4475                         ap->ability_match_count = 0;
4476                         ap->ability_match = 0;
4477                         ap->idle_match = 0;
4478                         ap->ack_match = 0;
4479
4480                         ap->state = ANEG_STATE_RESTART_INIT;
4481                 } else {
4482                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4483                 }
4484                 break;
4485
4486         case ANEG_STATE_RESTART_INIT:
4487                 ap->link_time = ap->cur_time;
4488                 ap->flags &= ~(MR_NP_LOADED);
4489                 ap->txconfig = 0;
4490                 tw32(MAC_TX_AUTO_NEG, 0);
4491                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4492                 tw32_f(MAC_MODE, tp->mac_mode);
4493                 udelay(40);
4494
4495                 ret = ANEG_TIMER_ENAB;
4496                 ap->state = ANEG_STATE_RESTART;
4497
4498                 /* fallthru */
4499         case ANEG_STATE_RESTART:
4500                 delta = ap->cur_time - ap->link_time;
4501                 if (delta > ANEG_STATE_SETTLE_TIME)
4502                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4503                 else
4504                         ret = ANEG_TIMER_ENAB;
4505                 break;
4506
4507         case ANEG_STATE_DISABLE_LINK_OK:
4508                 ret = ANEG_DONE;
4509                 break;
4510
4511         case ANEG_STATE_ABILITY_DETECT_INIT:
4512                 ap->flags &= ~(MR_TOGGLE_TX);
4513                 ap->txconfig = ANEG_CFG_FD;
4514                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4515                 if (flowctrl & ADVERTISE_1000XPAUSE)
4516                         ap->txconfig |= ANEG_CFG_PS1;
4517                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4518                         ap->txconfig |= ANEG_CFG_PS2;
4519                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4520                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4521                 tw32_f(MAC_MODE, tp->mac_mode);
4522                 udelay(40);
4523
4524                 ap->state = ANEG_STATE_ABILITY_DETECT;
4525                 break;
4526
4527         case ANEG_STATE_ABILITY_DETECT:
4528                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4529                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4530                 break;
4531
4532         case ANEG_STATE_ACK_DETECT_INIT:
4533                 ap->txconfig |= ANEG_CFG_ACK;
4534                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4535                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4536                 tw32_f(MAC_MODE, tp->mac_mode);
4537                 udelay(40);
4538
4539                 ap->state = ANEG_STATE_ACK_DETECT;
4540
4541                 /* fallthru */
4542         case ANEG_STATE_ACK_DETECT:
4543                 if (ap->ack_match != 0) {
4544                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4545                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4546                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4547                         } else {
4548                                 ap->state = ANEG_STATE_AN_ENABLE;
4549                         }
4550                 } else if (ap->ability_match != 0 &&
4551                            ap->rxconfig == 0) {
4552                         ap->state = ANEG_STATE_AN_ENABLE;
4553                 }
4554                 break;
4555
4556         case ANEG_STATE_COMPLETE_ACK_INIT:
4557                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4558                         ret = ANEG_FAILED;
4559                         break;
4560                 }
4561                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4562                                MR_LP_ADV_HALF_DUPLEX |
4563                                MR_LP_ADV_SYM_PAUSE |
4564                                MR_LP_ADV_ASYM_PAUSE |
4565                                MR_LP_ADV_REMOTE_FAULT1 |
4566                                MR_LP_ADV_REMOTE_FAULT2 |
4567                                MR_LP_ADV_NEXT_PAGE |
4568                                MR_TOGGLE_RX |
4569                                MR_NP_RX);
4570                 if (ap->rxconfig & ANEG_CFG_FD)
4571                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4572                 if (ap->rxconfig & ANEG_CFG_HD)
4573                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4574                 if (ap->rxconfig & ANEG_CFG_PS1)
4575                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4576                 if (ap->rxconfig & ANEG_CFG_PS2)
4577                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4578                 if (ap->rxconfig & ANEG_CFG_RF1)
4579                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4580                 if (ap->rxconfig & ANEG_CFG_RF2)
4581                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4582                 if (ap->rxconfig & ANEG_CFG_NP)
4583                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4584
4585                 ap->link_time = ap->cur_time;
4586
4587                 ap->flags ^= (MR_TOGGLE_TX);
4588                 if (ap->rxconfig & 0x0008)
4589                         ap->flags |= MR_TOGGLE_RX;
4590                 if (ap->rxconfig & ANEG_CFG_NP)
4591                         ap->flags |= MR_NP_RX;
4592                 ap->flags |= MR_PAGE_RX;
4593
4594                 ap->state = ANEG_STATE_COMPLETE_ACK;
4595                 ret = ANEG_TIMER_ENAB;
4596                 break;
4597
4598         case ANEG_STATE_COMPLETE_ACK:
4599                 if (ap->ability_match != 0 &&
4600                     ap->rxconfig == 0) {
4601                         ap->state = ANEG_STATE_AN_ENABLE;
4602                         break;
4603                 }
4604                 delta = ap->cur_time - ap->link_time;
4605                 if (delta > ANEG_STATE_SETTLE_TIME) {
4606                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4607                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4608                         } else {
4609                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4610                                     !(ap->flags & MR_NP_RX)) {
4611                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4612                                 } else {
4613                                         ret = ANEG_FAILED;
4614                                 }
4615                         }
4616                 }
4617                 break;
4618
4619         case ANEG_STATE_IDLE_DETECT_INIT:
4620                 ap->link_time = ap->cur_time;
4621                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4622                 tw32_f(MAC_MODE, tp->mac_mode);
4623                 udelay(40);
4624
4625                 ap->state = ANEG_STATE_IDLE_DETECT;
4626                 ret = ANEG_TIMER_ENAB;
4627                 break;
4628
4629         case ANEG_STATE_IDLE_DETECT:
4630                 if (ap->ability_match != 0 &&
4631                     ap->rxconfig == 0) {
4632                         ap->state = ANEG_STATE_AN_ENABLE;
4633                         break;
4634                 }
4635                 delta = ap->cur_time - ap->link_time;
4636                 if (delta > ANEG_STATE_SETTLE_TIME) {
4637                         /* XXX another gem from the Broadcom driver :( */
4638                         ap->state = ANEG_STATE_LINK_OK;
4639                 }
4640                 break;
4641
4642         case ANEG_STATE_LINK_OK:
4643                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4644                 ret = ANEG_DONE;
4645                 break;
4646
4647         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4648                 /* ??? unimplemented */
4649                 break;
4650
4651         case ANEG_STATE_NEXT_PAGE_WAIT:
4652                 /* ??? unimplemented */
4653                 break;
4654
4655         default:
4656                 ret = ANEG_FAILED;
4657                 break;
4658         }
4659
4660         return ret;
4661 }
4662
4663 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4664 {
4665         int res = 0;
4666         struct tg3_fiber_aneginfo aninfo;
4667         int status = ANEG_FAILED;
4668         unsigned int tick;
4669         u32 tmp;
4670
4671         tw32_f(MAC_TX_AUTO_NEG, 0);
4672
4673         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4674         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4675         udelay(40);
4676
4677         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4678         udelay(40);
4679
4680         memset(&aninfo, 0, sizeof(aninfo));
4681         aninfo.flags |= MR_AN_ENABLE;
4682         aninfo.state = ANEG_STATE_UNKNOWN;
4683         aninfo.cur_time = 0;
4684         tick = 0;
4685         while (++tick < 195000) {
4686                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4687                 if (status == ANEG_DONE || status == ANEG_FAILED)
4688                         break;
4689
4690                 udelay(1);
4691         }
4692
4693         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4694         tw32_f(MAC_MODE, tp->mac_mode);
4695         udelay(40);
4696
4697         *txflags = aninfo.txconfig;
4698         *rxflags = aninfo.flags;
4699
4700         if (status == ANEG_DONE &&
4701             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4702                              MR_LP_ADV_FULL_DUPLEX)))
4703                 res = 1;
4704
4705         return res;
4706 }
4707
4708 static void tg3_init_bcm8002(struct tg3 *tp)
4709 {
4710         u32 mac_status = tr32(MAC_STATUS);
4711         int i;
4712
4713         /* Reset when initting first time or we have a link. */
4714         if (tg3_flag(tp, INIT_COMPLETE) &&
4715             !(mac_status & MAC_STATUS_PCS_SYNCED))
4716                 return;
4717
4718         /* Set PLL lock range. */
4719         tg3_writephy(tp, 0x16, 0x8007);
4720
4721         /* SW reset */
4722         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4723
4724         /* Wait for reset to complete. */
4725         /* XXX schedule_timeout() ... */
4726         for (i = 0; i < 500; i++)
4727                 udelay(10);
4728
4729         /* Config mode; select PMA/Ch 1 regs. */
4730         tg3_writephy(tp, 0x10, 0x8411);
4731
4732         /* Enable auto-lock and comdet, select txclk for tx. */
4733         tg3_writephy(tp, 0x11, 0x0a10);
4734
4735         tg3_writephy(tp, 0x18, 0x00a0);
4736         tg3_writephy(tp, 0x16, 0x41ff);
4737
4738         /* Assert and deassert POR. */
4739         tg3_writephy(tp, 0x13, 0x0400);
4740         udelay(40);
4741         tg3_writephy(tp, 0x13, 0x0000);
4742
4743         tg3_writephy(tp, 0x11, 0x0a50);
4744         udelay(40);
4745         tg3_writephy(tp, 0x11, 0x0a10);
4746
4747         /* Wait for signal to stabilize */
4748         /* XXX schedule_timeout() ... */
4749         for (i = 0; i < 15000; i++)
4750                 udelay(10);
4751
4752         /* Deselect the channel register so we can read the PHYID
4753          * later.
4754          */
4755         tg3_writephy(tp, 0x10, 0x8011);
4756 }
4757
4758 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4759 {
4760         u16 flowctrl;
4761         u32 sg_dig_ctrl, sg_dig_status;
4762         u32 serdes_cfg, expected_sg_dig_ctrl;
4763         int workaround, port_a;
4764         int current_link_up;
4765
4766         serdes_cfg = 0;
4767         expected_sg_dig_ctrl = 0;
4768         workaround = 0;
4769         port_a = 1;
4770         current_link_up = 0;
4771
4772         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4773             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4774                 workaround = 1;
4775                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4776                         port_a = 0;
4777
4778                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4779                 /* preserve bits 20-23 for voltage regulator */
4780                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4781         }
4782
4783         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4784
4785         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4786                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4787                         if (workaround) {
4788                                 u32 val = serdes_cfg;
4789
4790                                 if (port_a)
4791                                         val |= 0xc010000;
4792                                 else
4793                                         val |= 0x4010000;
4794                                 tw32_f(MAC_SERDES_CFG, val);
4795                         }
4796
4797                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4798                 }
4799                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4800                         tg3_setup_flow_control(tp, 0, 0);
4801                         current_link_up = 1;
4802                 }
4803                 goto out;
4804         }
4805
4806         /* Want auto-negotiation.  */
4807         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4808
4809         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4810         if (flowctrl & ADVERTISE_1000XPAUSE)
4811                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4812         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4813                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4814
4815         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4816                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4817                     tp->serdes_counter &&
4818                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4819                                     MAC_STATUS_RCVD_CFG)) ==
4820                      MAC_STATUS_PCS_SYNCED)) {
4821                         tp->serdes_counter--;
4822                         current_link_up = 1;
4823                         goto out;
4824                 }
4825 restart_autoneg:
4826                 if (workaround)
4827                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4828                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4829                 udelay(5);
4830                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4831
4832                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4833                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4834         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4835                                  MAC_STATUS_SIGNAL_DET)) {
4836                 sg_dig_status = tr32(SG_DIG_STATUS);
4837                 mac_status = tr32(MAC_STATUS);
4838
4839                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4840                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4841                         u32 local_adv = 0, remote_adv = 0;
4842
4843                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4844                                 local_adv |= ADVERTISE_1000XPAUSE;
4845                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4846                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4847
4848                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4849                                 remote_adv |= LPA_1000XPAUSE;
4850                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4851                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4852
4853                         tp->link_config.rmt_adv =
4854                                            mii_adv_to_ethtool_adv_x(remote_adv);
4855
4856                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4857                         current_link_up = 1;
4858                         tp->serdes_counter = 0;
4859                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4860                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4861                         if (tp->serdes_counter)
4862                                 tp->serdes_counter--;
4863                         else {
4864                                 if (workaround) {
4865                                         u32 val = serdes_cfg;
4866
4867                                         if (port_a)
4868                                                 val |= 0xc010000;
4869                                         else
4870                                                 val |= 0x4010000;
4871
4872                                         tw32_f(MAC_SERDES_CFG, val);
4873                                 }
4874
4875                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4876                                 udelay(40);
4877
4878                                 /* Link parallel detection - link is up */
4879                                 /* only if we have PCS_SYNC and not */
4880                                 /* receiving config code words */
4881                                 mac_status = tr32(MAC_STATUS);
4882                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4883                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4884                                         tg3_setup_flow_control(tp, 0, 0);
4885                                         current_link_up = 1;
4886                                         tp->phy_flags |=
4887                                                 TG3_PHYFLG_PARALLEL_DETECT;
4888                                         tp->serdes_counter =
4889                                                 SERDES_PARALLEL_DET_TIMEOUT;
4890                                 } else
4891                                         goto restart_autoneg;
4892                         }
4893                 }
4894         } else {
4895                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4896                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4897         }
4898
4899 out:
4900         return current_link_up;
4901 }
4902
4903 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4904 {
4905         int current_link_up = 0;
4906
4907         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4908                 goto out;
4909
4910         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4911                 u32 txflags, rxflags;
4912                 int i;
4913
4914                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4915                         u32 local_adv = 0, remote_adv = 0;
4916
4917                         if (txflags & ANEG_CFG_PS1)
4918                                 local_adv |= ADVERTISE_1000XPAUSE;
4919                         if (txflags & ANEG_CFG_PS2)
4920                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4921
4922                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4923                                 remote_adv |= LPA_1000XPAUSE;
4924                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4925                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4926
4927                         tp->link_config.rmt_adv =
4928                                            mii_adv_to_ethtool_adv_x(remote_adv);
4929
4930                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4931
4932                         current_link_up = 1;
4933                 }
4934                 for (i = 0; i < 30; i++) {
4935                         udelay(20);
4936                         tw32_f(MAC_STATUS,
4937                                (MAC_STATUS_SYNC_CHANGED |
4938                                 MAC_STATUS_CFG_CHANGED));
4939                         udelay(40);
4940                         if ((tr32(MAC_STATUS) &
4941                              (MAC_STATUS_SYNC_CHANGED |
4942                               MAC_STATUS_CFG_CHANGED)) == 0)
4943                                 break;
4944                 }
4945
4946                 mac_status = tr32(MAC_STATUS);
4947                 if (current_link_up == 0 &&
4948                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4949                     !(mac_status & MAC_STATUS_RCVD_CFG))
4950                         current_link_up = 1;
4951         } else {
4952                 tg3_setup_flow_control(tp, 0, 0);
4953
4954                 /* Forcing 1000FD link up. */
4955                 current_link_up = 1;
4956
4957                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4958                 udelay(40);
4959
4960                 tw32_f(MAC_MODE, tp->mac_mode);
4961                 udelay(40);
4962         }
4963
4964 out:
4965         return current_link_up;
4966 }
4967
4968 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4969 {
4970         u32 orig_pause_cfg;
4971         u16 orig_active_speed;
4972         u8 orig_active_duplex;
4973         u32 mac_status;
4974         int current_link_up;
4975         int i;
4976
4977         orig_pause_cfg = tp->link_config.active_flowctrl;
4978         orig_active_speed = tp->link_config.active_speed;
4979         orig_active_duplex = tp->link_config.active_duplex;
4980
4981         if (!tg3_flag(tp, HW_AUTONEG) &&
4982             netif_carrier_ok(tp->dev) &&
4983             tg3_flag(tp, INIT_COMPLETE)) {
4984                 mac_status = tr32(MAC_STATUS);
4985                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4986                                MAC_STATUS_SIGNAL_DET |
4987                                MAC_STATUS_CFG_CHANGED |
4988                                MAC_STATUS_RCVD_CFG);
4989                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4990                                    MAC_STATUS_SIGNAL_DET)) {
4991                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4992                                             MAC_STATUS_CFG_CHANGED));
4993                         return 0;
4994                 }
4995         }
4996
4997         tw32_f(MAC_TX_AUTO_NEG, 0);
4998
4999         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5000         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5001         tw32_f(MAC_MODE, tp->mac_mode);
5002         udelay(40);
5003
5004         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5005                 tg3_init_bcm8002(tp);
5006
5007         /* Enable link change event even when serdes polling.  */
5008         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5009         udelay(40);
5010
5011         current_link_up = 0;
5012         tp->link_config.rmt_adv = 0;
5013         mac_status = tr32(MAC_STATUS);
5014
5015         if (tg3_flag(tp, HW_AUTONEG))
5016                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5017         else
5018                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5019
5020         tp->napi[0].hw_status->status =
5021                 (SD_STATUS_UPDATED |
5022                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5023
5024         for (i = 0; i < 100; i++) {
5025                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5026                                     MAC_STATUS_CFG_CHANGED));
5027                 udelay(5);
5028                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5029                                          MAC_STATUS_CFG_CHANGED |
5030                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5031                         break;
5032         }
5033
5034         mac_status = tr32(MAC_STATUS);
5035         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5036                 current_link_up = 0;
5037                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5038                     tp->serdes_counter == 0) {
5039                         tw32_f(MAC_MODE, (tp->mac_mode |
5040                                           MAC_MODE_SEND_CONFIGS));
5041                         udelay(1);
5042                         tw32_f(MAC_MODE, tp->mac_mode);
5043                 }
5044         }
5045
5046         if (current_link_up == 1) {
5047                 tp->link_config.active_speed = SPEED_1000;
5048                 tp->link_config.active_duplex = DUPLEX_FULL;
5049                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5050                                     LED_CTRL_LNKLED_OVERRIDE |
5051                                     LED_CTRL_1000MBPS_ON));
5052         } else {
5053                 tp->link_config.active_speed = SPEED_UNKNOWN;
5054                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5055                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5056                                     LED_CTRL_LNKLED_OVERRIDE |
5057                                     LED_CTRL_TRAFFIC_OVERRIDE));
5058         }
5059
5060         if (current_link_up != netif_carrier_ok(tp->dev)) {
5061                 if (current_link_up)
5062                         netif_carrier_on(tp->dev);
5063                 else
5064                         netif_carrier_off(tp->dev);
5065                 tg3_link_report(tp);
5066         } else {
5067                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5068                 if (orig_pause_cfg != now_pause_cfg ||
5069                     orig_active_speed != tp->link_config.active_speed ||
5070                     orig_active_duplex != tp->link_config.active_duplex)
5071                         tg3_link_report(tp);
5072         }
5073
5074         return 0;
5075 }
5076
5077 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5078 {
5079         int current_link_up, err = 0;
5080         u32 bmsr, bmcr;
5081         u16 current_speed;
5082         u8 current_duplex;
5083         u32 local_adv, remote_adv;
5084
5085         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5086         tw32_f(MAC_MODE, tp->mac_mode);
5087         udelay(40);
5088
5089         tw32(MAC_EVENT, 0);
5090
5091         tw32_f(MAC_STATUS,
5092              (MAC_STATUS_SYNC_CHANGED |
5093               MAC_STATUS_CFG_CHANGED |
5094               MAC_STATUS_MI_COMPLETION |
5095               MAC_STATUS_LNKSTATE_CHANGED));
5096         udelay(40);
5097
5098         if (force_reset)
5099                 tg3_phy_reset(tp);
5100
5101         current_link_up = 0;
5102         current_speed = SPEED_UNKNOWN;
5103         current_duplex = DUPLEX_UNKNOWN;
5104         tp->link_config.rmt_adv = 0;
5105
5106         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5109                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5110                         bmsr |= BMSR_LSTATUS;
5111                 else
5112                         bmsr &= ~BMSR_LSTATUS;
5113         }
5114
5115         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5116
5117         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5118             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5119                 /* do nothing, just check for link up at the end */
5120         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5121                 u32 adv, newadv;
5122
5123                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5124                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5125                                  ADVERTISE_1000XPAUSE |
5126                                  ADVERTISE_1000XPSE_ASYM |
5127                                  ADVERTISE_SLCT);
5128
5129                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5130                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5131
5132                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5133                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5134                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5135                         tg3_writephy(tp, MII_BMCR, bmcr);
5136
5137                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5138                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5139                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5140
5141                         return err;
5142                 }
5143         } else {
5144                 u32 new_bmcr;
5145
5146                 bmcr &= ~BMCR_SPEED1000;
5147                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5148
5149                 if (tp->link_config.duplex == DUPLEX_FULL)
5150                         new_bmcr |= BMCR_FULLDPLX;
5151
5152                 if (new_bmcr != bmcr) {
5153                         /* BMCR_SPEED1000 is a reserved bit that needs
5154                          * to be set on write.
5155                          */
5156                         new_bmcr |= BMCR_SPEED1000;
5157
5158                         /* Force a linkdown */
5159                         if (netif_carrier_ok(tp->dev)) {
5160                                 u32 adv;
5161
5162                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5163                                 adv &= ~(ADVERTISE_1000XFULL |
5164                                          ADVERTISE_1000XHALF |
5165                                          ADVERTISE_SLCT);
5166                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5167                                 tg3_writephy(tp, MII_BMCR, bmcr |
5168                                                            BMCR_ANRESTART |
5169                                                            BMCR_ANENABLE);
5170                                 udelay(10);
5171                                 netif_carrier_off(tp->dev);
5172                         }
5173                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5174                         bmcr = new_bmcr;
5175                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5177                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5178                             ASIC_REV_5714) {
5179                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5180                                         bmsr |= BMSR_LSTATUS;
5181                                 else
5182                                         bmsr &= ~BMSR_LSTATUS;
5183                         }
5184                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5185                 }
5186         }
5187
5188         if (bmsr & BMSR_LSTATUS) {
5189                 current_speed = SPEED_1000;
5190                 current_link_up = 1;
5191                 if (bmcr & BMCR_FULLDPLX)
5192                         current_duplex = DUPLEX_FULL;
5193                 else
5194                         current_duplex = DUPLEX_HALF;
5195
5196                 local_adv = 0;
5197                 remote_adv = 0;
5198
5199                 if (bmcr & BMCR_ANENABLE) {
5200                         u32 common;
5201
5202                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5203                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5204                         common = local_adv & remote_adv;
5205                         if (common & (ADVERTISE_1000XHALF |
5206                                       ADVERTISE_1000XFULL)) {
5207                                 if (common & ADVERTISE_1000XFULL)
5208                                         current_duplex = DUPLEX_FULL;
5209                                 else
5210                                         current_duplex = DUPLEX_HALF;
5211
5212                                 tp->link_config.rmt_adv =
5213                                            mii_adv_to_ethtool_adv_x(remote_adv);
5214                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5215                                 /* Link is up via parallel detect */
5216                         } else {
5217                                 current_link_up = 0;
5218                         }
5219                 }
5220         }
5221
5222         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5223                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5224
5225         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5226         if (tp->link_config.active_duplex == DUPLEX_HALF)
5227                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5228
5229         tw32_f(MAC_MODE, tp->mac_mode);
5230         udelay(40);
5231
5232         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5233
5234         tp->link_config.active_speed = current_speed;
5235         tp->link_config.active_duplex = current_duplex;
5236
5237         if (current_link_up != netif_carrier_ok(tp->dev)) {
5238                 if (current_link_up)
5239                         netif_carrier_on(tp->dev);
5240                 else {
5241                         netif_carrier_off(tp->dev);
5242                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243                 }
5244                 tg3_link_report(tp);
5245         }
5246         return err;
5247 }
5248
5249 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5250 {
5251         if (tp->serdes_counter) {
5252                 /* Give autoneg time to complete. */
5253                 tp->serdes_counter--;
5254                 return;
5255         }
5256
5257         if (!netif_carrier_ok(tp->dev) &&
5258             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5259                 u32 bmcr;
5260
5261                 tg3_readphy(tp, MII_BMCR, &bmcr);
5262                 if (bmcr & BMCR_ANENABLE) {
5263                         u32 phy1, phy2;
5264
5265                         /* Select shadow register 0x1f */
5266                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5267                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5268
5269                         /* Select expansion interrupt status register */
5270                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5271                                          MII_TG3_DSP_EXP1_INT_STAT);
5272                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5273                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5274
5275                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5276                                 /* We have signal detect and not receiving
5277                                  * config code words, link is up by parallel
5278                                  * detection.
5279                                  */
5280
5281                                 bmcr &= ~BMCR_ANENABLE;
5282                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5283                                 tg3_writephy(tp, MII_BMCR, bmcr);
5284                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5285                         }
5286                 }
5287         } else if (netif_carrier_ok(tp->dev) &&
5288                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5289                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5290                 u32 phy2;
5291
5292                 /* Select expansion interrupt status register */
5293                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5294                                  MII_TG3_DSP_EXP1_INT_STAT);
5295                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5296                 if (phy2 & 0x20) {
5297                         u32 bmcr;
5298
5299                         /* Config code words received, turn on autoneg. */
5300                         tg3_readphy(tp, MII_BMCR, &bmcr);
5301                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5302
5303                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5304
5305                 }
5306         }
5307 }
5308
5309 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5310 {
5311         u32 val;
5312         int err;
5313
5314         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5315                 err = tg3_setup_fiber_phy(tp, force_reset);
5316         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5317                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5318         else
5319                 err = tg3_setup_copper_phy(tp, force_reset);
5320
5321         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5322                 u32 scale;
5323
5324                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5325                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5326                         scale = 65;
5327                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5328                         scale = 6;
5329                 else
5330                         scale = 12;
5331
5332                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5333                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5334                 tw32(GRC_MISC_CFG, val);
5335         }
5336
5337         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5338               (6 << TX_LENGTHS_IPG_SHIFT);
5339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5340                 val |= tr32(MAC_TX_LENGTHS) &
5341                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5342                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5343
5344         if (tp->link_config.active_speed == SPEED_1000 &&
5345             tp->link_config.active_duplex == DUPLEX_HALF)
5346                 tw32(MAC_TX_LENGTHS, val |
5347                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5348         else
5349                 tw32(MAC_TX_LENGTHS, val |
5350                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5351
5352         if (!tg3_flag(tp, 5705_PLUS)) {
5353                 if (netif_carrier_ok(tp->dev)) {
5354                         tw32(HOSTCC_STAT_COAL_TICKS,
5355                              tp->coal.stats_block_coalesce_usecs);
5356                 } else {
5357                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5358                 }
5359         }
5360
5361         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5362                 val = tr32(PCIE_PWR_MGMT_THRESH);
5363                 if (!netif_carrier_ok(tp->dev))
5364                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5365                               tp->pwrmgmt_thresh;
5366                 else
5367                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5368                 tw32(PCIE_PWR_MGMT_THRESH, val);
5369         }
5370
5371         return err;
5372 }
5373
5374 static inline int tg3_irq_sync(struct tg3 *tp)
5375 {
5376         return tp->irq_sync;
5377 }
5378
5379 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5380 {
5381         int i;
5382
5383         dst = (u32 *)((u8 *)dst + off);
5384         for (i = 0; i < len; i += sizeof(u32))
5385                 *dst++ = tr32(off + i);
5386 }
5387
5388 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5389 {
5390         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5391         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5392         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5393         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5394         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5395         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5396         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5397         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5398         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5399         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5400         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5401         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5402         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5403         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5404         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5405         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5406         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5407         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5408         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5409
5410         if (tg3_flag(tp, SUPPORT_MSIX))
5411                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5412
5413         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5414         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5415         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5416         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5417         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5418         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5419         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5420         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5421
5422         if (!tg3_flag(tp, 5705_PLUS)) {
5423                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5424                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5425                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5426         }
5427
5428         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5429         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5430         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5431         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5432         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5433
5434         if (tg3_flag(tp, NVRAM))
5435                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5436 }
5437
5438 static void tg3_dump_state(struct tg3 *tp)
5439 {
5440         int i;
5441         u32 *regs;
5442
5443         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5444         if (!regs) {
5445                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5446                 return;
5447         }
5448
5449         if (tg3_flag(tp, PCI_EXPRESS)) {
5450                 /* Read up to but not including private PCI registers */
5451                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5452                         regs[i / sizeof(u32)] = tr32(i);
5453         } else
5454                 tg3_dump_legacy_regs(tp, regs);
5455
5456         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5457                 if (!regs[i + 0] && !regs[i + 1] &&
5458                     !regs[i + 2] && !regs[i + 3])
5459                         continue;
5460
5461                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5462                            i * 4,
5463                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5464         }
5465
5466         kfree(regs);
5467
5468         for (i = 0; i < tp->irq_cnt; i++) {
5469                 struct tg3_napi *tnapi = &tp->napi[i];
5470
5471                 /* SW status block */
5472                 netdev_err(tp->dev,
5473                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5474                            i,
5475                            tnapi->hw_status->status,
5476                            tnapi->hw_status->status_tag,
5477                            tnapi->hw_status->rx_jumbo_consumer,
5478                            tnapi->hw_status->rx_consumer,
5479                            tnapi->hw_status->rx_mini_consumer,
5480                            tnapi->hw_status->idx[0].rx_producer,
5481                            tnapi->hw_status->idx[0].tx_consumer);
5482
5483                 netdev_err(tp->dev,
5484                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5485                            i,
5486                            tnapi->last_tag, tnapi->last_irq_tag,
5487                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5488                            tnapi->rx_rcb_ptr,
5489                            tnapi->prodring.rx_std_prod_idx,
5490                            tnapi->prodring.rx_std_cons_idx,
5491                            tnapi->prodring.rx_jmb_prod_idx,
5492                            tnapi->prodring.rx_jmb_cons_idx);
5493         }
5494 }
5495
5496 /* This is called whenever we suspect that the system chipset is re-
5497  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5498  * is bogus tx completions. We try to recover by setting the
5499  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5500  * in the workqueue.
5501  */
5502 static void tg3_tx_recover(struct tg3 *tp)
5503 {
5504         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5505                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5506
5507         netdev_warn(tp->dev,
5508                     "The system may be re-ordering memory-mapped I/O "
5509                     "cycles to the network device, attempting to recover. "
5510                     "Please report the problem to the driver maintainer "
5511                     "and include system chipset information.\n");
5512
5513         spin_lock(&tp->lock);
5514         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5515         spin_unlock(&tp->lock);
5516 }
5517
5518 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5519 {
5520         /* Tell compiler to fetch tx indices from memory. */
5521         barrier();
5522         return tnapi->tx_pending -
5523                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5524 }
5525
5526 /* Tigon3 never reports partial packet sends.  So we do not
5527  * need special logic to handle SKBs that have not had all
5528  * of their frags sent yet, like SunGEM does.
5529  */
5530 static void tg3_tx(struct tg3_napi *tnapi)
5531 {
5532         struct tg3 *tp = tnapi->tp;
5533         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5534         u32 sw_idx = tnapi->tx_cons;
5535         struct netdev_queue *txq;
5536         int index = tnapi - tp->napi;
5537         unsigned int pkts_compl = 0, bytes_compl = 0;
5538
5539         if (tg3_flag(tp, ENABLE_TSS))
5540                 index--;
5541
5542         txq = netdev_get_tx_queue(tp->dev, index);
5543
5544         while (sw_idx != hw_idx) {
5545                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5546                 struct sk_buff *skb = ri->skb;
5547                 int i, tx_bug = 0;
5548
5549                 if (unlikely(skb == NULL)) {
5550                         tg3_tx_recover(tp);
5551                         return;
5552                 }
5553
5554                 pci_unmap_single(tp->pdev,
5555                                  dma_unmap_addr(ri, mapping),
5556                                  skb_headlen(skb),
5557                                  PCI_DMA_TODEVICE);
5558
5559                 ri->skb = NULL;
5560
5561                 while (ri->fragmented) {
5562                         ri->fragmented = false;
5563                         sw_idx = NEXT_TX(sw_idx);
5564                         ri = &tnapi->tx_buffers[sw_idx];
5565                 }
5566
5567                 sw_idx = NEXT_TX(sw_idx);
5568
5569                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5570                         ri = &tnapi->tx_buffers[sw_idx];
5571                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5572                                 tx_bug = 1;
5573
5574                         pci_unmap_page(tp->pdev,
5575                                        dma_unmap_addr(ri, mapping),
5576                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5577                                        PCI_DMA_TODEVICE);
5578
5579                         while (ri->fragmented) {
5580                                 ri->fragmented = false;
5581                                 sw_idx = NEXT_TX(sw_idx);
5582                                 ri = &tnapi->tx_buffers[sw_idx];
5583                         }
5584
5585                         sw_idx = NEXT_TX(sw_idx);
5586                 }
5587
5588                 pkts_compl++;
5589                 bytes_compl += skb->len;
5590
5591                 dev_kfree_skb(skb);
5592
5593                 if (unlikely(tx_bug)) {
5594                         tg3_tx_recover(tp);
5595                         return;
5596                 }
5597         }
5598
5599         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5600
5601         tnapi->tx_cons = sw_idx;
5602
5603         /* Need to make the tx_cons update visible to tg3_start_xmit()
5604          * before checking for netif_queue_stopped().  Without the
5605          * memory barrier, there is a small possibility that tg3_start_xmit()
5606          * will miss it and cause the queue to be stopped forever.
5607          */
5608         smp_mb();
5609
5610         if (unlikely(netif_tx_queue_stopped(txq) &&
5611                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5612                 __netif_tx_lock(txq, smp_processor_id());
5613                 if (netif_tx_queue_stopped(txq) &&
5614                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5615                         netif_tx_wake_queue(txq);
5616                 __netif_tx_unlock(txq);
5617         }
5618 }
5619
5620 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5621 {
5622         if (!ri->data)
5623                 return;
5624
5625         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5626                          map_sz, PCI_DMA_FROMDEVICE);
5627         kfree(ri->data);
5628         ri->data = NULL;
5629 }
5630
5631 /* Returns size of skb allocated or < 0 on error.
5632  *
5633  * We only need to fill in the address because the other members
5634  * of the RX descriptor are invariant, see tg3_init_rings.
5635  *
5636  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5637  * posting buffers we only dirty the first cache line of the RX
5638  * descriptor (containing the address).  Whereas for the RX status
5639  * buffers the cpu only reads the last cacheline of the RX descriptor
5640  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5641  */
5642 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5643                             u32 opaque_key, u32 dest_idx_unmasked)
5644 {
5645         struct tg3_rx_buffer_desc *desc;
5646         struct ring_info *map;
5647         u8 *data;
5648         dma_addr_t mapping;
5649         int skb_size, data_size, dest_idx;
5650
5651         switch (opaque_key) {
5652         case RXD_OPAQUE_RING_STD:
5653                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5654                 desc = &tpr->rx_std[dest_idx];
5655                 map = &tpr->rx_std_buffers[dest_idx];
5656                 data_size = tp->rx_pkt_map_sz;
5657                 break;
5658
5659         case RXD_OPAQUE_RING_JUMBO:
5660                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5661                 desc = &tpr->rx_jmb[dest_idx].std;
5662                 map = &tpr->rx_jmb_buffers[dest_idx];
5663                 data_size = TG3_RX_JMB_MAP_SZ;
5664                 break;
5665
5666         default:
5667                 return -EINVAL;
5668         }
5669
5670         /* Do not overwrite any of the map or rp information
5671          * until we are sure we can commit to a new buffer.
5672          *
5673          * Callers depend upon this behavior and assume that
5674          * we leave everything unchanged if we fail.
5675          */
5676         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5677                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5678         data = kmalloc(skb_size, GFP_ATOMIC);
5679         if (!data)
5680                 return -ENOMEM;
5681
5682         mapping = pci_map_single(tp->pdev,
5683                                  data + TG3_RX_OFFSET(tp),
5684                                  data_size,
5685                                  PCI_DMA_FROMDEVICE);
5686         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5687                 kfree(data);
5688                 return -EIO;
5689         }
5690
5691         map->data = data;
5692         dma_unmap_addr_set(map, mapping, mapping);
5693
5694         desc->addr_hi = ((u64)mapping >> 32);
5695         desc->addr_lo = ((u64)mapping & 0xffffffff);
5696
5697         return data_size;
5698 }
5699
5700 /* We only need to move over in the address because the other
5701  * members of the RX descriptor are invariant.  See notes above
5702  * tg3_alloc_rx_data for full details.
5703  */
5704 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5705                            struct tg3_rx_prodring_set *dpr,
5706                            u32 opaque_key, int src_idx,
5707                            u32 dest_idx_unmasked)
5708 {
5709         struct tg3 *tp = tnapi->tp;
5710         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5711         struct ring_info *src_map, *dest_map;
5712         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5713         int dest_idx;
5714
5715         switch (opaque_key) {
5716         case RXD_OPAQUE_RING_STD:
5717                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5718                 dest_desc = &dpr->rx_std[dest_idx];
5719                 dest_map = &dpr->rx_std_buffers[dest_idx];
5720                 src_desc = &spr->rx_std[src_idx];
5721                 src_map = &spr->rx_std_buffers[src_idx];
5722                 break;
5723
5724         case RXD_OPAQUE_RING_JUMBO:
5725                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5726                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5727                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5728                 src_desc = &spr->rx_jmb[src_idx].std;
5729                 src_map = &spr->rx_jmb_buffers[src_idx];
5730                 break;
5731
5732         default:
5733                 return;
5734         }
5735
5736         dest_map->data = src_map->data;
5737         dma_unmap_addr_set(dest_map, mapping,
5738                            dma_unmap_addr(src_map, mapping));
5739         dest_desc->addr_hi = src_desc->addr_hi;
5740         dest_desc->addr_lo = src_desc->addr_lo;
5741
5742         /* Ensure that the update to the skb happens after the physical
5743          * addresses have been transferred to the new BD location.
5744          */
5745         smp_wmb();
5746
5747         src_map->data = NULL;
5748 }
5749
5750 /* The RX ring scheme is composed of multiple rings which post fresh
5751  * buffers to the chip, and one special ring the chip uses to report
5752  * status back to the host.
5753  *
5754  * The special ring reports the status of received packets to the
5755  * host.  The chip does not write into the original descriptor the
5756  * RX buffer was obtained from.  The chip simply takes the original
5757  * descriptor as provided by the host, updates the status and length
5758  * field, then writes this into the next status ring entry.
5759  *
5760  * Each ring the host uses to post buffers to the chip is described
5761  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5762  * it is first placed into the on-chip ram.  When the packet's length
5763  * is known, it walks down the TG3_BDINFO entries to select the ring.
5764  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5765  * which is within the range of the new packet's length is chosen.
5766  *
5767  * The "separate ring for rx status" scheme may sound queer, but it makes
5768  * sense from a cache coherency perspective.  If only the host writes
5769  * to the buffer post rings, and only the chip writes to the rx status
5770  * rings, then cache lines never move beyond shared-modified state.
5771  * If both the host and chip were to write into the same ring, cache line
5772  * eviction could occur since both entities want it in an exclusive state.
5773  */
5774 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5775 {
5776         struct tg3 *tp = tnapi->tp;
5777         u32 work_mask, rx_std_posted = 0;
5778         u32 std_prod_idx, jmb_prod_idx;
5779         u32 sw_idx = tnapi->rx_rcb_ptr;
5780         u16 hw_idx;
5781         int received;
5782         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5783
5784         hw_idx = *(tnapi->rx_rcb_prod_idx);
5785         /*
5786          * We need to order the read of hw_idx and the read of
5787          * the opaque cookie.
5788          */
5789         rmb();
5790         work_mask = 0;
5791         received = 0;
5792         std_prod_idx = tpr->rx_std_prod_idx;
5793         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5794         while (sw_idx != hw_idx && budget > 0) {
5795                 struct ring_info *ri;
5796                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5797                 unsigned int len;
5798                 struct sk_buff *skb;
5799                 dma_addr_t dma_addr;
5800                 u32 opaque_key, desc_idx, *post_ptr;
5801                 u8 *data;
5802
5803                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5804                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5805                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5806                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5807                         dma_addr = dma_unmap_addr(ri, mapping);
5808                         data = ri->data;
5809                         post_ptr = &std_prod_idx;
5810                         rx_std_posted++;
5811                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5812                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5813                         dma_addr = dma_unmap_addr(ri, mapping);
5814                         data = ri->data;
5815                         post_ptr = &jmb_prod_idx;
5816                 } else
5817                         goto next_pkt_nopost;
5818
5819                 work_mask |= opaque_key;
5820
5821                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5822                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5823                 drop_it:
5824                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5825                                        desc_idx, *post_ptr);
5826                 drop_it_no_recycle:
5827                         /* Other statistics kept track of by card. */
5828                         tp->rx_dropped++;
5829                         goto next_pkt;
5830                 }
5831
5832                 prefetch(data + TG3_RX_OFFSET(tp));
5833                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5834                       ETH_FCS_LEN;
5835
5836                 if (len > TG3_RX_COPY_THRESH(tp)) {
5837                         int skb_size;
5838
5839                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5840                                                     *post_ptr);
5841                         if (skb_size < 0)
5842                                 goto drop_it;
5843
5844                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5845                                          PCI_DMA_FROMDEVICE);
5846
5847                         skb = build_skb(data, 0);
5848                         if (!skb) {
5849                                 kfree(data);
5850                                 goto drop_it_no_recycle;
5851                         }
5852                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5853                         /* Ensure that the update to the data happens
5854                          * after the usage of the old DMA mapping.
5855                          */
5856                         smp_wmb();
5857
5858                         ri->data = NULL;
5859
5860                 } else {
5861                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5862                                        desc_idx, *post_ptr);
5863
5864                         skb = netdev_alloc_skb(tp->dev,
5865                                                len + TG3_RAW_IP_ALIGN);
5866                         if (skb == NULL)
5867                                 goto drop_it_no_recycle;
5868
5869                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5870                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5871                         memcpy(skb->data,
5872                                data + TG3_RX_OFFSET(tp),
5873                                len);
5874                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5875                 }
5876
5877                 skb_put(skb, len);
5878                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5879                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5880                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5881                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5882                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5883                 else
5884                         skb_checksum_none_assert(skb);
5885
5886                 skb->protocol = eth_type_trans(skb, tp->dev);
5887
5888                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5889                     skb->protocol != htons(ETH_P_8021Q)) {
5890                         dev_kfree_skb(skb);
5891                         goto drop_it_no_recycle;
5892                 }
5893
5894                 if (desc->type_flags & RXD_FLAG_VLAN &&
5895                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5896                         __vlan_hwaccel_put_tag(skb,
5897                                                desc->err_vlan & RXD_VLAN_MASK);
5898
5899                 napi_gro_receive(&tnapi->napi, skb);
5900
5901                 received++;
5902                 budget--;
5903
5904 next_pkt:
5905                 (*post_ptr)++;
5906
5907                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5908                         tpr->rx_std_prod_idx = std_prod_idx &
5909                                                tp->rx_std_ring_mask;
5910                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5911                                      tpr->rx_std_prod_idx);
5912                         work_mask &= ~RXD_OPAQUE_RING_STD;
5913                         rx_std_posted = 0;
5914                 }
5915 next_pkt_nopost:
5916                 sw_idx++;
5917                 sw_idx &= tp->rx_ret_ring_mask;
5918
5919                 /* Refresh hw_idx to see if there is new work */
5920                 if (sw_idx == hw_idx) {
5921                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5922                         rmb();
5923                 }
5924         }
5925
5926         /* ACK the status ring. */
5927         tnapi->rx_rcb_ptr = sw_idx;
5928         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5929
5930         /* Refill RX ring(s). */
5931         if (!tg3_flag(tp, ENABLE_RSS)) {
5932                 /* Sync BD data before updating mailbox */
5933                 wmb();
5934
5935                 if (work_mask & RXD_OPAQUE_RING_STD) {
5936                         tpr->rx_std_prod_idx = std_prod_idx &
5937                                                tp->rx_std_ring_mask;
5938                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5939                                      tpr->rx_std_prod_idx);
5940                 }
5941                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5942                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5943                                                tp->rx_jmb_ring_mask;
5944                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5945                                      tpr->rx_jmb_prod_idx);
5946                 }
5947                 mmiowb();
5948         } else if (work_mask) {
5949                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5950                  * updated before the producer indices can be updated.
5951                  */
5952                 smp_wmb();
5953
5954                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5955                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5956
5957                 if (tnapi != &tp->napi[1]) {
5958                         tp->rx_refill = true;
5959                         napi_schedule(&tp->napi[1].napi);
5960                 }
5961         }
5962
5963         return received;
5964 }
5965
5966 static void tg3_poll_link(struct tg3 *tp)
5967 {
5968         /* handle link change and other phy events */
5969         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5970                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5971
5972                 if (sblk->status & SD_STATUS_LINK_CHG) {
5973                         sblk->status = SD_STATUS_UPDATED |
5974                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5975                         spin_lock(&tp->lock);
5976                         if (tg3_flag(tp, USE_PHYLIB)) {
5977                                 tw32_f(MAC_STATUS,
5978                                      (MAC_STATUS_SYNC_CHANGED |
5979                                       MAC_STATUS_CFG_CHANGED |
5980                                       MAC_STATUS_MI_COMPLETION |
5981                                       MAC_STATUS_LNKSTATE_CHANGED));
5982                                 udelay(40);
5983                         } else
5984                                 tg3_setup_phy(tp, 0);
5985                         spin_unlock(&tp->lock);
5986                 }
5987         }
5988 }
5989
5990 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5991                                 struct tg3_rx_prodring_set *dpr,
5992                                 struct tg3_rx_prodring_set *spr)
5993 {
5994         u32 si, di, cpycnt, src_prod_idx;
5995         int i, err = 0;
5996
5997         while (1) {
5998                 src_prod_idx = spr->rx_std_prod_idx;
5999
6000                 /* Make sure updates to the rx_std_buffers[] entries and the
6001                  * standard producer index are seen in the correct order.
6002                  */
6003                 smp_rmb();
6004
6005                 if (spr->rx_std_cons_idx == src_prod_idx)
6006                         break;
6007
6008                 if (spr->rx_std_cons_idx < src_prod_idx)
6009                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6010                 else
6011                         cpycnt = tp->rx_std_ring_mask + 1 -
6012                                  spr->rx_std_cons_idx;
6013
6014                 cpycnt = min(cpycnt,
6015                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6016
6017                 si = spr->rx_std_cons_idx;
6018                 di = dpr->rx_std_prod_idx;
6019
6020                 for (i = di; i < di + cpycnt; i++) {
6021                         if (dpr->rx_std_buffers[i].data) {
6022                                 cpycnt = i - di;
6023                                 err = -ENOSPC;
6024                                 break;
6025                         }
6026                 }
6027
6028                 if (!cpycnt)
6029                         break;
6030
6031                 /* Ensure that updates to the rx_std_buffers ring and the
6032                  * shadowed hardware producer ring from tg3_recycle_skb() are
6033                  * ordered correctly WRT the skb check above.
6034                  */
6035                 smp_rmb();
6036
6037                 memcpy(&dpr->rx_std_buffers[di],
6038                        &spr->rx_std_buffers[si],
6039                        cpycnt * sizeof(struct ring_info));
6040
6041                 for (i = 0; i < cpycnt; i++, di++, si++) {
6042                         struct tg3_rx_buffer_desc *sbd, *dbd;
6043                         sbd = &spr->rx_std[si];
6044                         dbd = &dpr->rx_std[di];
6045                         dbd->addr_hi = sbd->addr_hi;
6046                         dbd->addr_lo = sbd->addr_lo;
6047                 }
6048
6049                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6050                                        tp->rx_std_ring_mask;
6051                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6052                                        tp->rx_std_ring_mask;
6053         }
6054
6055         while (1) {
6056                 src_prod_idx = spr->rx_jmb_prod_idx;
6057
6058                 /* Make sure updates to the rx_jmb_buffers[] entries and
6059                  * the jumbo producer index are seen in the correct order.
6060                  */
6061                 smp_rmb();
6062
6063                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6064                         break;
6065
6066                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6067                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6068                 else
6069                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6070                                  spr->rx_jmb_cons_idx;
6071
6072                 cpycnt = min(cpycnt,
6073                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6074
6075                 si = spr->rx_jmb_cons_idx;
6076                 di = dpr->rx_jmb_prod_idx;
6077
6078                 for (i = di; i < di + cpycnt; i++) {
6079                         if (dpr->rx_jmb_buffers[i].data) {
6080                                 cpycnt = i - di;
6081                                 err = -ENOSPC;
6082                                 break;
6083                         }
6084                 }
6085
6086                 if (!cpycnt)
6087                         break;
6088
6089                 /* Ensure that updates to the rx_jmb_buffers ring and the
6090                  * shadowed hardware producer ring from tg3_recycle_skb() are
6091                  * ordered correctly WRT the skb check above.
6092                  */
6093                 smp_rmb();
6094
6095                 memcpy(&dpr->rx_jmb_buffers[di],
6096                        &spr->rx_jmb_buffers[si],
6097                        cpycnt * sizeof(struct ring_info));
6098
6099                 for (i = 0; i < cpycnt; i++, di++, si++) {
6100                         struct tg3_rx_buffer_desc *sbd, *dbd;
6101                         sbd = &spr->rx_jmb[si].std;
6102                         dbd = &dpr->rx_jmb[di].std;
6103                         dbd->addr_hi = sbd->addr_hi;
6104                         dbd->addr_lo = sbd->addr_lo;
6105                 }
6106
6107                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6108                                        tp->rx_jmb_ring_mask;
6109                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6110                                        tp->rx_jmb_ring_mask;
6111         }
6112
6113         return err;
6114 }
6115
6116 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6117 {
6118         struct tg3 *tp = tnapi->tp;
6119
6120         /* run TX completion thread */
6121         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6122                 tg3_tx(tnapi);
6123                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6124                         return work_done;
6125         }
6126
6127         /* run RX thread, within the bounds set by NAPI.
6128          * All RX "locking" is done by ensuring outside
6129          * code synchronizes with tg3->napi.poll()
6130          */
6131         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6132                 work_done += tg3_rx(tnapi, budget - work_done);
6133
6134         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6135                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6136                 int i, err = 0;
6137                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6138                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6139
6140                 tp->rx_refill = false;
6141                 for (i = 1; i < tp->irq_cnt; i++)
6142                         err |= tg3_rx_prodring_xfer(tp, dpr,
6143                                                     &tp->napi[i].prodring);
6144
6145                 wmb();
6146
6147                 if (std_prod_idx != dpr->rx_std_prod_idx)
6148                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6149                                      dpr->rx_std_prod_idx);
6150
6151                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6152                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6153                                      dpr->rx_jmb_prod_idx);
6154
6155                 mmiowb();
6156
6157                 if (err)
6158                         tw32_f(HOSTCC_MODE, tp->coal_now);
6159         }
6160
6161         return work_done;
6162 }
6163
6164 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6165 {
6166         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6167                 schedule_work(&tp->reset_task);
6168 }
6169
6170 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6171 {
6172         cancel_work_sync(&tp->reset_task);
6173         tg3_flag_clear(tp, RESET_TASK_PENDING);
6174         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6175 }
6176
6177 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6178 {
6179         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6180         struct tg3 *tp = tnapi->tp;
6181         int work_done = 0;
6182         struct tg3_hw_status *sblk = tnapi->hw_status;
6183
6184         while (1) {
6185                 work_done = tg3_poll_work(tnapi, work_done, budget);
6186
6187                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6188                         goto tx_recovery;
6189
6190                 if (unlikely(work_done >= budget))
6191                         break;
6192
6193                 /* tp->last_tag is used in tg3_int_reenable() below
6194                  * to tell the hw how much work has been processed,
6195                  * so we must read it before checking for more work.
6196                  */
6197                 tnapi->last_tag = sblk->status_tag;
6198                 tnapi->last_irq_tag = tnapi->last_tag;
6199                 rmb();
6200
6201                 /* check for RX/TX work to do */
6202                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6203                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6204
6205                         /* This test here is not race free, but will reduce
6206                          * the number of interrupts by looping again.
6207                          */
6208                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6209                                 continue;
6210
6211                         napi_complete(napi);
6212                         /* Reenable interrupts. */
6213                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6214
6215                         /* This test here is synchronized by napi_schedule()
6216                          * and napi_complete() to close the race condition.
6217                          */
6218                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6219                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6220                                                   HOSTCC_MODE_ENABLE |
6221                                                   tnapi->coal_now);
6222                         }
6223                         mmiowb();
6224                         break;
6225                 }
6226         }
6227
6228         return work_done;
6229
6230 tx_recovery:
6231         /* work_done is guaranteed to be less than budget. */
6232         napi_complete(napi);
6233         tg3_reset_task_schedule(tp);
6234         return work_done;
6235 }
6236
6237 static void tg3_process_error(struct tg3 *tp)
6238 {
6239         u32 val;
6240         bool real_error = false;
6241
6242         if (tg3_flag(tp, ERROR_PROCESSED))
6243                 return;
6244
6245         /* Check Flow Attention register */
6246         val = tr32(HOSTCC_FLOW_ATTN);
6247         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6248                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6249                 real_error = true;
6250         }
6251
6252         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6253                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6254                 real_error = true;
6255         }
6256
6257         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6258                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6259                 real_error = true;
6260         }
6261
6262         if (!real_error)
6263                 return;
6264
6265         tg3_dump_state(tp);
6266
6267         tg3_flag_set(tp, ERROR_PROCESSED);
6268         tg3_reset_task_schedule(tp);
6269 }
6270
6271 static int tg3_poll(struct napi_struct *napi, int budget)
6272 {
6273         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6274         struct tg3 *tp = tnapi->tp;
6275         int work_done = 0;
6276         struct tg3_hw_status *sblk = tnapi->hw_status;
6277
6278         while (1) {
6279                 if (sblk->status & SD_STATUS_ERROR)
6280                         tg3_process_error(tp);
6281
6282                 tg3_poll_link(tp);
6283
6284                 work_done = tg3_poll_work(tnapi, work_done, budget);
6285
6286                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6287                         goto tx_recovery;
6288
6289                 if (unlikely(work_done >= budget))
6290                         break;
6291
6292                 if (tg3_flag(tp, TAGGED_STATUS)) {
6293                         /* tp->last_tag is used in tg3_int_reenable() below
6294                          * to tell the hw how much work has been processed,
6295                          * so we must read it before checking for more work.
6296                          */
6297                         tnapi->last_tag = sblk->status_tag;
6298                         tnapi->last_irq_tag = tnapi->last_tag;
6299                         rmb();
6300                 } else
6301                         sblk->status &= ~SD_STATUS_UPDATED;
6302
6303                 if (likely(!tg3_has_work(tnapi))) {
6304                         napi_complete(napi);
6305                         tg3_int_reenable(tnapi);
6306                         break;
6307                 }
6308         }
6309
6310         return work_done;
6311
6312 tx_recovery:
6313         /* work_done is guaranteed to be less than budget. */
6314         napi_complete(napi);
6315         tg3_reset_task_schedule(tp);
6316         return work_done;
6317 }
6318
6319 static void tg3_napi_disable(struct tg3 *tp)
6320 {
6321         int i;
6322
6323         for (i = tp->irq_cnt - 1; i >= 0; i--)
6324                 napi_disable(&tp->napi[i].napi);
6325 }
6326
6327 static void tg3_napi_enable(struct tg3 *tp)
6328 {
6329         int i;
6330
6331         for (i = 0; i < tp->irq_cnt; i++)
6332                 napi_enable(&tp->napi[i].napi);
6333 }
6334
6335 static void tg3_napi_init(struct tg3 *tp)
6336 {
6337         int i;
6338
6339         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6340         for (i = 1; i < tp->irq_cnt; i++)
6341                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6342 }
6343
6344 static void tg3_napi_fini(struct tg3 *tp)
6345 {
6346         int i;
6347
6348         for (i = 0; i < tp->irq_cnt; i++)
6349                 netif_napi_del(&tp->napi[i].napi);
6350 }
6351
6352 static inline void tg3_netif_stop(struct tg3 *tp)
6353 {
6354         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6355         tg3_napi_disable(tp);
6356         netif_tx_disable(tp->dev);
6357 }
6358
6359 static inline void tg3_netif_start(struct tg3 *tp)
6360 {
6361         /* NOTE: unconditional netif_tx_wake_all_queues is only
6362          * appropriate so long as all callers are assured to
6363          * have free tx slots (such as after tg3_init_hw)
6364          */
6365         netif_tx_wake_all_queues(tp->dev);
6366
6367         tg3_napi_enable(tp);
6368         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6369         tg3_enable_ints(tp);
6370 }
6371
6372 static void tg3_irq_quiesce(struct tg3 *tp)
6373 {
6374         int i;
6375
6376         BUG_ON(tp->irq_sync);
6377
6378         tp->irq_sync = 1;
6379         smp_mb();
6380
6381         for (i = 0; i < tp->irq_cnt; i++)
6382                 synchronize_irq(tp->napi[i].irq_vec);
6383 }
6384
6385 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6386  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6387  * with as well.  Most of the time, this is not necessary except when
6388  * shutting down the device.
6389  */
6390 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6391 {
6392         spin_lock_bh(&tp->lock);
6393         if (irq_sync)
6394                 tg3_irq_quiesce(tp);
6395 }
6396
6397 static inline void tg3_full_unlock(struct tg3 *tp)
6398 {
6399         spin_unlock_bh(&tp->lock);
6400 }
6401
6402 /* One-shot MSI handler - Chip automatically disables interrupt
6403  * after sending MSI so driver doesn't have to do it.
6404  */
6405 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6406 {
6407         struct tg3_napi *tnapi = dev_id;
6408         struct tg3 *tp = tnapi->tp;
6409
6410         prefetch(tnapi->hw_status);
6411         if (tnapi->rx_rcb)
6412                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6413
6414         if (likely(!tg3_irq_sync(tp)))
6415                 napi_schedule(&tnapi->napi);
6416
6417         return IRQ_HANDLED;
6418 }
6419
6420 /* MSI ISR - No need to check for interrupt sharing and no need to
6421  * flush status block and interrupt mailbox. PCI ordering rules
6422  * guarantee that MSI will arrive after the status block.
6423  */
6424 static irqreturn_t tg3_msi(int irq, void *dev_id)
6425 {
6426         struct tg3_napi *tnapi = dev_id;
6427         struct tg3 *tp = tnapi->tp;
6428
6429         prefetch(tnapi->hw_status);
6430         if (tnapi->rx_rcb)
6431                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6432         /*
6433          * Writing any value to intr-mbox-0 clears PCI INTA# and
6434          * chip-internal interrupt pending events.
6435          * Writing non-zero to intr-mbox-0 additional tells the
6436          * NIC to stop sending us irqs, engaging "in-intr-handler"
6437          * event coalescing.
6438          */
6439         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6440         if (likely(!tg3_irq_sync(tp)))
6441                 napi_schedule(&tnapi->napi);
6442
6443         return IRQ_RETVAL(1);
6444 }
6445
6446 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6447 {
6448         struct tg3_napi *tnapi = dev_id;
6449         struct tg3 *tp = tnapi->tp;
6450         struct tg3_hw_status *sblk = tnapi->hw_status;
6451         unsigned int handled = 1;
6452
6453         /* In INTx mode, it is possible for the interrupt to arrive at
6454          * the CPU before the status block posted prior to the interrupt.
6455          * Reading the PCI State register will confirm whether the
6456          * interrupt is ours and will flush the status block.
6457          */
6458         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6459                 if (tg3_flag(tp, CHIP_RESETTING) ||
6460                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6461                         handled = 0;
6462                         goto out;
6463                 }
6464         }
6465
6466         /*
6467          * Writing any value to intr-mbox-0 clears PCI INTA# and
6468          * chip-internal interrupt pending events.
6469          * Writing non-zero to intr-mbox-0 additional tells the
6470          * NIC to stop sending us irqs, engaging "in-intr-handler"
6471          * event coalescing.
6472          *
6473          * Flush the mailbox to de-assert the IRQ immediately to prevent
6474          * spurious interrupts.  The flush impacts performance but
6475          * excessive spurious interrupts can be worse in some cases.
6476          */
6477         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6478         if (tg3_irq_sync(tp))
6479                 goto out;
6480         sblk->status &= ~SD_STATUS_UPDATED;
6481         if (likely(tg3_has_work(tnapi))) {
6482                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6483                 napi_schedule(&tnapi->napi);
6484         } else {
6485                 /* No work, shared interrupt perhaps?  re-enable
6486                  * interrupts, and flush that PCI write
6487                  */
6488                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6489                                0x00000000);
6490         }
6491 out:
6492         return IRQ_RETVAL(handled);
6493 }
6494
6495 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6496 {
6497         struct tg3_napi *tnapi = dev_id;
6498         struct tg3 *tp = tnapi->tp;
6499         struct tg3_hw_status *sblk = tnapi->hw_status;
6500         unsigned int handled = 1;
6501
6502         /* In INTx mode, it is possible for the interrupt to arrive at
6503          * the CPU before the status block posted prior to the interrupt.
6504          * Reading the PCI State register will confirm whether the
6505          * interrupt is ours and will flush the status block.
6506          */
6507         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6508                 if (tg3_flag(tp, CHIP_RESETTING) ||
6509                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6510                         handled = 0;
6511                         goto out;
6512                 }
6513         }
6514
6515         /*
6516          * writing any value to intr-mbox-0 clears PCI INTA# and
6517          * chip-internal interrupt pending events.
6518          * writing non-zero to intr-mbox-0 additional tells the
6519          * NIC to stop sending us irqs, engaging "in-intr-handler"
6520          * event coalescing.
6521          *
6522          * Flush the mailbox to de-assert the IRQ immediately to prevent
6523          * spurious interrupts.  The flush impacts performance but
6524          * excessive spurious interrupts can be worse in some cases.
6525          */
6526         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6527
6528         /*
6529          * In a shared interrupt configuration, sometimes other devices'
6530          * interrupts will scream.  We record the current status tag here
6531          * so that the above check can report that the screaming interrupts
6532          * are unhandled.  Eventually they will be silenced.
6533          */
6534         tnapi->last_irq_tag = sblk->status_tag;
6535
6536         if (tg3_irq_sync(tp))
6537                 goto out;
6538
6539         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6540
6541         napi_schedule(&tnapi->napi);
6542
6543 out:
6544         return IRQ_RETVAL(handled);
6545 }
6546
6547 /* ISR for interrupt test */
6548 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6549 {
6550         struct tg3_napi *tnapi = dev_id;
6551         struct tg3 *tp = tnapi->tp;
6552         struct tg3_hw_status *sblk = tnapi->hw_status;
6553
6554         if ((sblk->status & SD_STATUS_UPDATED) ||
6555             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6556                 tg3_disable_ints(tp);
6557                 return IRQ_RETVAL(1);
6558         }
6559         return IRQ_RETVAL(0);
6560 }
6561
6562 #ifdef CONFIG_NET_POLL_CONTROLLER
6563 static void tg3_poll_controller(struct net_device *dev)
6564 {
6565         int i;
6566         struct tg3 *tp = netdev_priv(dev);
6567
6568         for (i = 0; i < tp->irq_cnt; i++)
6569                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6570 }
6571 #endif
6572
6573 static void tg3_tx_timeout(struct net_device *dev)
6574 {
6575         struct tg3 *tp = netdev_priv(dev);
6576
6577         if (netif_msg_tx_err(tp)) {
6578                 netdev_err(dev, "transmit timed out, resetting\n");
6579                 tg3_dump_state(tp);
6580         }
6581
6582         tg3_reset_task_schedule(tp);
6583 }
6584
6585 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6586 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6587 {
6588         u32 base = (u32) mapping & 0xffffffff;
6589
6590         return (base > 0xffffdcc0) && (base + len + 8 < base);
6591 }
6592
6593 /* Test for DMA addresses > 40-bit */
6594 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6595                                           int len)
6596 {
6597 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6598         if (tg3_flag(tp, 40BIT_DMA_BUG))
6599                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6600         return 0;
6601 #else
6602         return 0;
6603 #endif
6604 }
6605
6606 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6607                                  dma_addr_t mapping, u32 len, u32 flags,
6608                                  u32 mss, u32 vlan)
6609 {
6610         txbd->addr_hi = ((u64) mapping >> 32);
6611         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6612         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6613         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6614 }
6615
6616 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6617                             dma_addr_t map, u32 len, u32 flags,
6618                             u32 mss, u32 vlan)
6619 {
6620         struct tg3 *tp = tnapi->tp;
6621         bool hwbug = false;
6622
6623         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6624                 hwbug = true;
6625
6626         if (tg3_4g_overflow_test(map, len))
6627                 hwbug = true;
6628
6629         if (tg3_40bit_overflow_test(tp, map, len))
6630                 hwbug = true;
6631
6632         if (tp->dma_limit) {
6633                 u32 prvidx = *entry;
6634                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6635                 while (len > tp->dma_limit && *budget) {
6636                         u32 frag_len = tp->dma_limit;
6637                         len -= tp->dma_limit;
6638
6639                         /* Avoid the 8byte DMA problem */
6640                         if (len <= 8) {
6641                                 len += tp->dma_limit / 2;
6642                                 frag_len = tp->dma_limit / 2;
6643                         }
6644
6645                         tnapi->tx_buffers[*entry].fragmented = true;
6646
6647                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6648                                       frag_len, tmp_flag, mss, vlan);
6649                         *budget -= 1;
6650                         prvidx = *entry;
6651                         *entry = NEXT_TX(*entry);
6652
6653                         map += frag_len;
6654                 }
6655
6656                 if (len) {
6657                         if (*budget) {
6658                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6659                                               len, flags, mss, vlan);
6660                                 *budget -= 1;
6661                                 *entry = NEXT_TX(*entry);
6662                         } else {
6663                                 hwbug = true;
6664                                 tnapi->tx_buffers[prvidx].fragmented = false;
6665                         }
6666                 }
6667         } else {
6668                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6669                               len, flags, mss, vlan);
6670                 *entry = NEXT_TX(*entry);
6671         }
6672
6673         return hwbug;
6674 }
6675
6676 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6677 {
6678         int i;
6679         struct sk_buff *skb;
6680         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6681
6682         skb = txb->skb;
6683         txb->skb = NULL;
6684
6685         pci_unmap_single(tnapi->tp->pdev,
6686                          dma_unmap_addr(txb, mapping),
6687                          skb_headlen(skb),
6688                          PCI_DMA_TODEVICE);
6689
6690         while (txb->fragmented) {
6691                 txb->fragmented = false;
6692                 entry = NEXT_TX(entry);
6693                 txb = &tnapi->tx_buffers[entry];
6694         }
6695
6696         for (i = 0; i <= last; i++) {
6697                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6698
6699                 entry = NEXT_TX(entry);
6700                 txb = &tnapi->tx_buffers[entry];
6701
6702                 pci_unmap_page(tnapi->tp->pdev,
6703                                dma_unmap_addr(txb, mapping),
6704                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6705
6706                 while (txb->fragmented) {
6707                         txb->fragmented = false;
6708                         entry = NEXT_TX(entry);
6709                         txb = &tnapi->tx_buffers[entry];
6710                 }
6711         }
6712 }
6713
6714 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6715 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6716                                        struct sk_buff **pskb,
6717                                        u32 *entry, u32 *budget,
6718                                        u32 base_flags, u32 mss, u32 vlan)
6719 {
6720         struct tg3 *tp = tnapi->tp;
6721         struct sk_buff *new_skb, *skb = *pskb;
6722         dma_addr_t new_addr = 0;
6723         int ret = 0;
6724
6725         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6726                 new_skb = skb_copy(skb, GFP_ATOMIC);
6727         else {
6728                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6729
6730                 new_skb = skb_copy_expand(skb,
6731                                           skb_headroom(skb) + more_headroom,
6732                                           skb_tailroom(skb), GFP_ATOMIC);
6733         }
6734
6735         if (!new_skb) {
6736                 ret = -1;
6737         } else {
6738                 /* New SKB is guaranteed to be linear. */
6739                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6740                                           PCI_DMA_TODEVICE);
6741                 /* Make sure the mapping succeeded */
6742                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6743                         dev_kfree_skb(new_skb);
6744                         ret = -1;
6745                 } else {
6746                         u32 save_entry = *entry;
6747
6748                         base_flags |= TXD_FLAG_END;
6749
6750                         tnapi->tx_buffers[*entry].skb = new_skb;
6751                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6752                                            mapping, new_addr);
6753
6754                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6755                                             new_skb->len, base_flags,
6756                                             mss, vlan)) {
6757                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6758                                 dev_kfree_skb(new_skb);
6759                                 ret = -1;
6760                         }
6761                 }
6762         }
6763
6764         dev_kfree_skb(skb);
6765         *pskb = new_skb;
6766         return ret;
6767 }
6768
6769 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6770
6771 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6772  * TSO header is greater than 80 bytes.
6773  */
6774 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6775 {
6776         struct sk_buff *segs, *nskb;
6777         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6778
6779         /* Estimate the number of fragments in the worst case */
6780         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6781                 netif_stop_queue(tp->dev);
6782
6783                 /* netif_tx_stop_queue() must be done before checking
6784                  * checking tx index in tg3_tx_avail() below, because in
6785                  * tg3_tx(), we update tx index before checking for
6786                  * netif_tx_queue_stopped().
6787                  */
6788                 smp_mb();
6789                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6790                         return NETDEV_TX_BUSY;
6791
6792                 netif_wake_queue(tp->dev);
6793         }
6794
6795         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6796         if (IS_ERR(segs))
6797                 goto tg3_tso_bug_end;
6798
6799         do {
6800                 nskb = segs;
6801                 segs = segs->next;
6802                 nskb->next = NULL;
6803                 tg3_start_xmit(nskb, tp->dev);
6804         } while (segs);
6805
6806 tg3_tso_bug_end:
6807         dev_kfree_skb(skb);
6808
6809         return NETDEV_TX_OK;
6810 }
6811
6812 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6813  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6814  */
6815 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6816 {
6817         struct tg3 *tp = netdev_priv(dev);
6818         u32 len, entry, base_flags, mss, vlan = 0;
6819         u32 budget;
6820         int i = -1, would_hit_hwbug;
6821         dma_addr_t mapping;
6822         struct tg3_napi *tnapi;
6823         struct netdev_queue *txq;
6824         unsigned int last;
6825
6826         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6827         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6828         if (tg3_flag(tp, ENABLE_TSS))
6829                 tnapi++;
6830
6831         budget = tg3_tx_avail(tnapi);
6832
6833         /* We are running in BH disabled context with netif_tx_lock
6834          * and TX reclaim runs via tp->napi.poll inside of a software
6835          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6836          * no IRQ context deadlocks to worry about either.  Rejoice!
6837          */
6838         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6839                 if (!netif_tx_queue_stopped(txq)) {
6840                         netif_tx_stop_queue(txq);
6841
6842                         /* This is a hard error, log it. */
6843                         netdev_err(dev,
6844                                    "BUG! Tx Ring full when queue awake!\n");
6845                 }
6846                 return NETDEV_TX_BUSY;
6847         }
6848
6849         entry = tnapi->tx_prod;
6850         base_flags = 0;
6851         if (skb->ip_summed == CHECKSUM_PARTIAL)
6852                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6853
6854         mss = skb_shinfo(skb)->gso_size;
6855         if (mss) {
6856                 struct iphdr *iph;
6857                 u32 tcp_opt_len, hdr_len;
6858
6859                 if (skb_header_cloned(skb) &&
6860                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6861                         goto drop;
6862
6863                 iph = ip_hdr(skb);
6864                 tcp_opt_len = tcp_optlen(skb);
6865
6866                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6867
6868                 if (!skb_is_gso_v6(skb)) {
6869                         iph->check = 0;
6870                         iph->tot_len = htons(mss + hdr_len);
6871                 }
6872
6873                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6874                     tg3_flag(tp, TSO_BUG))
6875                         return tg3_tso_bug(tp, skb);
6876
6877                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6878                                TXD_FLAG_CPU_POST_DMA);
6879
6880                 if (tg3_flag(tp, HW_TSO_1) ||
6881                     tg3_flag(tp, HW_TSO_2) ||
6882                     tg3_flag(tp, HW_TSO_3)) {
6883                         tcp_hdr(skb)->check = 0;
6884                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6885                 } else
6886                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6887                                                                  iph->daddr, 0,
6888                                                                  IPPROTO_TCP,
6889                                                                  0);
6890
6891                 if (tg3_flag(tp, HW_TSO_3)) {
6892                         mss |= (hdr_len & 0xc) << 12;
6893                         if (hdr_len & 0x10)
6894                                 base_flags |= 0x00000010;
6895                         base_flags |= (hdr_len & 0x3e0) << 5;
6896                 } else if (tg3_flag(tp, HW_TSO_2))
6897                         mss |= hdr_len << 9;
6898                 else if (tg3_flag(tp, HW_TSO_1) ||
6899                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6900                         if (tcp_opt_len || iph->ihl > 5) {
6901                                 int tsflags;
6902
6903                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6904                                 mss |= (tsflags << 11);
6905                         }
6906                 } else {
6907                         if (tcp_opt_len || iph->ihl > 5) {
6908                                 int tsflags;
6909
6910                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6911                                 base_flags |= tsflags << 12;
6912                         }
6913                 }
6914         }
6915
6916         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6917             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6918                 base_flags |= TXD_FLAG_JMB_PKT;
6919
6920         if (vlan_tx_tag_present(skb)) {
6921                 base_flags |= TXD_FLAG_VLAN;
6922                 vlan = vlan_tx_tag_get(skb);
6923         }
6924
6925         len = skb_headlen(skb);
6926
6927         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6928         if (pci_dma_mapping_error(tp->pdev, mapping))
6929                 goto drop;
6930
6931
6932         tnapi->tx_buffers[entry].skb = skb;
6933         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6934
6935         would_hit_hwbug = 0;
6936
6937         if (tg3_flag(tp, 5701_DMA_BUG))
6938                 would_hit_hwbug = 1;
6939
6940         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6941                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6942                             mss, vlan)) {
6943                 would_hit_hwbug = 1;
6944         } else if (skb_shinfo(skb)->nr_frags > 0) {
6945                 u32 tmp_mss = mss;
6946
6947                 if (!tg3_flag(tp, HW_TSO_1) &&
6948                     !tg3_flag(tp, HW_TSO_2) &&
6949                     !tg3_flag(tp, HW_TSO_3))
6950                         tmp_mss = 0;
6951
6952                 /* Now loop through additional data
6953                  * fragments, and queue them.
6954                  */
6955                 last = skb_shinfo(skb)->nr_frags - 1;
6956                 for (i = 0; i <= last; i++) {
6957                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6958
6959                         len = skb_frag_size(frag);
6960                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6961                                                    len, DMA_TO_DEVICE);
6962
6963                         tnapi->tx_buffers[entry].skb = NULL;
6964                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6965                                            mapping);
6966                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6967                                 goto dma_error;
6968
6969                         if (!budget ||
6970                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6971                                             len, base_flags |
6972                                             ((i == last) ? TXD_FLAG_END : 0),
6973                                             tmp_mss, vlan)) {
6974                                 would_hit_hwbug = 1;
6975                                 break;
6976                         }
6977                 }
6978         }
6979
6980         if (would_hit_hwbug) {
6981                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6982
6983                 /* If the workaround fails due to memory/mapping
6984                  * failure, silently drop this packet.
6985                  */
6986                 entry = tnapi->tx_prod;
6987                 budget = tg3_tx_avail(tnapi);
6988                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6989                                                 base_flags, mss, vlan))
6990                         goto drop_nofree;
6991         }
6992
6993         skb_tx_timestamp(skb);
6994         netdev_tx_sent_queue(txq, skb->len);
6995
6996         /* Sync BD data before updating mailbox */
6997         wmb();
6998
6999         /* Packets are ready, update Tx producer idx local and on card. */
7000         tw32_tx_mbox(tnapi->prodmbox, entry);
7001
7002         tnapi->tx_prod = entry;
7003         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7004                 netif_tx_stop_queue(txq);
7005
7006                 /* netif_tx_stop_queue() must be done before checking
7007                  * checking tx index in tg3_tx_avail() below, because in
7008                  * tg3_tx(), we update tx index before checking for
7009                  * netif_tx_queue_stopped().
7010                  */
7011                 smp_mb();
7012                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7013                         netif_tx_wake_queue(txq);
7014         }
7015
7016         mmiowb();
7017         return NETDEV_TX_OK;
7018
7019 dma_error:
7020         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7021         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7022 drop:
7023         dev_kfree_skb(skb);
7024 drop_nofree:
7025         tp->tx_dropped++;
7026         return NETDEV_TX_OK;
7027 }
7028
7029 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7030 {
7031         if (enable) {
7032                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7033                                   MAC_MODE_PORT_MODE_MASK);
7034
7035                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7036
7037                 if (!tg3_flag(tp, 5705_PLUS))
7038                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7039
7040                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7041                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7042                 else
7043                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7044         } else {
7045                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7046
7047                 if (tg3_flag(tp, 5705_PLUS) ||
7048                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7049                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7050                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7051         }
7052
7053         tw32(MAC_MODE, tp->mac_mode);
7054         udelay(40);
7055 }
7056
7057 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7058 {
7059         u32 val, bmcr, mac_mode, ptest = 0;
7060
7061         tg3_phy_toggle_apd(tp, false);
7062         tg3_phy_toggle_automdix(tp, 0);
7063
7064         if (extlpbk && tg3_phy_set_extloopbk(tp))
7065                 return -EIO;
7066
7067         bmcr = BMCR_FULLDPLX;
7068         switch (speed) {
7069         case SPEED_10:
7070                 break;
7071         case SPEED_100:
7072                 bmcr |= BMCR_SPEED100;
7073                 break;
7074         case SPEED_1000:
7075         default:
7076                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7077                         speed = SPEED_100;
7078                         bmcr |= BMCR_SPEED100;
7079                 } else {
7080                         speed = SPEED_1000;
7081                         bmcr |= BMCR_SPEED1000;
7082                 }
7083         }
7084
7085         if (extlpbk) {
7086                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7087                         tg3_readphy(tp, MII_CTRL1000, &val);
7088                         val |= CTL1000_AS_MASTER |
7089                                CTL1000_ENABLE_MASTER;
7090                         tg3_writephy(tp, MII_CTRL1000, val);
7091                 } else {
7092                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7093                                 MII_TG3_FET_PTEST_TRIM_2;
7094                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7095                 }
7096         } else
7097                 bmcr |= BMCR_LOOPBACK;
7098
7099         tg3_writephy(tp, MII_BMCR, bmcr);
7100
7101         /* The write needs to be flushed for the FETs */
7102         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7103                 tg3_readphy(tp, MII_BMCR, &bmcr);
7104
7105         udelay(40);
7106
7107         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7109                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7110                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7111                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7112
7113                 /* The write needs to be flushed for the AC131 */
7114                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7115         }
7116
7117         /* Reset to prevent losing 1st rx packet intermittently */
7118         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7119             tg3_flag(tp, 5780_CLASS)) {
7120                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7121                 udelay(10);
7122                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7123         }
7124
7125         mac_mode = tp->mac_mode &
7126                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7127         if (speed == SPEED_1000)
7128                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7129         else
7130                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7131
7132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7133                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7134
7135                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7136                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7137                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7138                         mac_mode |= MAC_MODE_LINK_POLARITY;
7139
7140                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7141                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7142         }
7143
7144         tw32(MAC_MODE, mac_mode);
7145         udelay(40);
7146
7147         return 0;
7148 }
7149
7150 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7151 {
7152         struct tg3 *tp = netdev_priv(dev);
7153
7154         if (features & NETIF_F_LOOPBACK) {
7155                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7156                         return;
7157
7158                 spin_lock_bh(&tp->lock);
7159                 tg3_mac_loopback(tp, true);
7160                 netif_carrier_on(tp->dev);
7161                 spin_unlock_bh(&tp->lock);
7162                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7163         } else {
7164                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7165                         return;
7166
7167                 spin_lock_bh(&tp->lock);
7168                 tg3_mac_loopback(tp, false);
7169                 /* Force link status check */
7170                 tg3_setup_phy(tp, 1);
7171                 spin_unlock_bh(&tp->lock);
7172                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7173         }
7174 }
7175
7176 static netdev_features_t tg3_fix_features(struct net_device *dev,
7177         netdev_features_t features)
7178 {
7179         struct tg3 *tp = netdev_priv(dev);
7180
7181         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7182                 features &= ~NETIF_F_ALL_TSO;
7183
7184         return features;
7185 }
7186
7187 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7188 {
7189         netdev_features_t changed = dev->features ^ features;
7190
7191         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7192                 tg3_set_loopback(dev, features);
7193
7194         return 0;
7195 }
7196
7197 static void tg3_rx_prodring_free(struct tg3 *tp,
7198                                  struct tg3_rx_prodring_set *tpr)
7199 {
7200         int i;
7201
7202         if (tpr != &tp->napi[0].prodring) {
7203                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7204                      i = (i + 1) & tp->rx_std_ring_mask)
7205                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7206                                         tp->rx_pkt_map_sz);
7207
7208                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7209                         for (i = tpr->rx_jmb_cons_idx;
7210                              i != tpr->rx_jmb_prod_idx;
7211                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7212                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7213                                                 TG3_RX_JMB_MAP_SZ);
7214                         }
7215                 }
7216
7217                 return;
7218         }
7219
7220         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7221                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7222                                 tp->rx_pkt_map_sz);
7223
7224         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7225                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7226                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7227                                         TG3_RX_JMB_MAP_SZ);
7228         }
7229 }
7230
7231 /* Initialize rx rings for packet processing.
7232  *
7233  * The chip has been shut down and the driver detached from
7234  * the networking, so no interrupts or new tx packets will
7235  * end up in the driver.  tp->{tx,}lock are held and thus
7236  * we may not sleep.
7237  */
7238 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7239                                  struct tg3_rx_prodring_set *tpr)
7240 {
7241         u32 i, rx_pkt_dma_sz;
7242
7243         tpr->rx_std_cons_idx = 0;
7244         tpr->rx_std_prod_idx = 0;
7245         tpr->rx_jmb_cons_idx = 0;
7246         tpr->rx_jmb_prod_idx = 0;
7247
7248         if (tpr != &tp->napi[0].prodring) {
7249                 memset(&tpr->rx_std_buffers[0], 0,
7250                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7251                 if (tpr->rx_jmb_buffers)
7252                         memset(&tpr->rx_jmb_buffers[0], 0,
7253                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7254                 goto done;
7255         }
7256
7257         /* Zero out all descriptors. */
7258         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7259
7260         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7261         if (tg3_flag(tp, 5780_CLASS) &&
7262             tp->dev->mtu > ETH_DATA_LEN)
7263                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7264         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7265
7266         /* Initialize invariants of the rings, we only set this
7267          * stuff once.  This works because the card does not
7268          * write into the rx buffer posting rings.
7269          */
7270         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7271                 struct tg3_rx_buffer_desc *rxd;
7272
7273                 rxd = &tpr->rx_std[i];
7274                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7275                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7276                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7277                                (i << RXD_OPAQUE_INDEX_SHIFT));
7278         }
7279
7280         /* Now allocate fresh SKBs for each rx ring. */
7281         for (i = 0; i < tp->rx_pending; i++) {
7282                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7283                         netdev_warn(tp->dev,
7284                                     "Using a smaller RX standard ring. Only "
7285                                     "%d out of %d buffers were allocated "
7286                                     "successfully\n", i, tp->rx_pending);
7287                         if (i == 0)
7288                                 goto initfail;
7289                         tp->rx_pending = i;
7290                         break;
7291                 }
7292         }
7293
7294         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7295                 goto done;
7296
7297         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7298
7299         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7300                 goto done;
7301
7302         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7303                 struct tg3_rx_buffer_desc *rxd;
7304
7305                 rxd = &tpr->rx_jmb[i].std;
7306                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7307                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7308                                   RXD_FLAG_JUMBO;
7309                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7310                        (i << RXD_OPAQUE_INDEX_SHIFT));
7311         }
7312
7313         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7314                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7315                         netdev_warn(tp->dev,
7316                                     "Using a smaller RX jumbo ring. Only %d "
7317                                     "out of %d buffers were allocated "
7318                                     "successfully\n", i, tp->rx_jumbo_pending);
7319                         if (i == 0)
7320                                 goto initfail;
7321                         tp->rx_jumbo_pending = i;
7322                         break;
7323                 }
7324         }
7325
7326 done:
7327         return 0;
7328
7329 initfail:
7330         tg3_rx_prodring_free(tp, tpr);
7331         return -ENOMEM;
7332 }
7333
7334 static void tg3_rx_prodring_fini(struct tg3 *tp,
7335                                  struct tg3_rx_prodring_set *tpr)
7336 {
7337         kfree(tpr->rx_std_buffers);
7338         tpr->rx_std_buffers = NULL;
7339         kfree(tpr->rx_jmb_buffers);
7340         tpr->rx_jmb_buffers = NULL;
7341         if (tpr->rx_std) {
7342                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7343                                   tpr->rx_std, tpr->rx_std_mapping);
7344                 tpr->rx_std = NULL;
7345         }
7346         if (tpr->rx_jmb) {
7347                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7348                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7349                 tpr->rx_jmb = NULL;
7350         }
7351 }
7352
7353 static int tg3_rx_prodring_init(struct tg3 *tp,
7354                                 struct tg3_rx_prodring_set *tpr)
7355 {
7356         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7357                                       GFP_KERNEL);
7358         if (!tpr->rx_std_buffers)
7359                 return -ENOMEM;
7360
7361         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7362                                          TG3_RX_STD_RING_BYTES(tp),
7363                                          &tpr->rx_std_mapping,
7364                                          GFP_KERNEL);
7365         if (!tpr->rx_std)
7366                 goto err_out;
7367
7368         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7369                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7370                                               GFP_KERNEL);
7371                 if (!tpr->rx_jmb_buffers)
7372                         goto err_out;
7373
7374                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7375                                                  TG3_RX_JMB_RING_BYTES(tp),
7376                                                  &tpr->rx_jmb_mapping,
7377                                                  GFP_KERNEL);
7378                 if (!tpr->rx_jmb)
7379                         goto err_out;
7380         }
7381
7382         return 0;
7383
7384 err_out:
7385         tg3_rx_prodring_fini(tp, tpr);
7386         return -ENOMEM;
7387 }
7388
7389 /* Free up pending packets in all rx/tx rings.
7390  *
7391  * The chip has been shut down and the driver detached from
7392  * the networking, so no interrupts or new tx packets will
7393  * end up in the driver.  tp->{tx,}lock is not held and we are not
7394  * in an interrupt context and thus may sleep.
7395  */
7396 static void tg3_free_rings(struct tg3 *tp)
7397 {
7398         int i, j;
7399
7400         for (j = 0; j < tp->irq_cnt; j++) {
7401                 struct tg3_napi *tnapi = &tp->napi[j];
7402
7403                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7404
7405                 if (!tnapi->tx_buffers)
7406                         continue;
7407
7408                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7409                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7410
7411                         if (!skb)
7412                                 continue;
7413
7414                         tg3_tx_skb_unmap(tnapi, i,
7415                                          skb_shinfo(skb)->nr_frags - 1);
7416
7417                         dev_kfree_skb_any(skb);
7418                 }
7419                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7420         }
7421 }
7422
7423 /* Initialize tx/rx rings for packet processing.
7424  *
7425  * The chip has been shut down and the driver detached from
7426  * the networking, so no interrupts or new tx packets will
7427  * end up in the driver.  tp->{tx,}lock are held and thus
7428  * we may not sleep.
7429  */
7430 static int tg3_init_rings(struct tg3 *tp)
7431 {
7432         int i;
7433
7434         /* Free up all the SKBs. */
7435         tg3_free_rings(tp);
7436
7437         for (i = 0; i < tp->irq_cnt; i++) {
7438                 struct tg3_napi *tnapi = &tp->napi[i];
7439
7440                 tnapi->last_tag = 0;
7441                 tnapi->last_irq_tag = 0;
7442                 tnapi->hw_status->status = 0;
7443                 tnapi->hw_status->status_tag = 0;
7444                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7445
7446                 tnapi->tx_prod = 0;
7447                 tnapi->tx_cons = 0;
7448                 if (tnapi->tx_ring)
7449                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7450
7451                 tnapi->rx_rcb_ptr = 0;
7452                 if (tnapi->rx_rcb)
7453                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7454
7455                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7456                         tg3_free_rings(tp);
7457                         return -ENOMEM;
7458                 }
7459         }
7460
7461         return 0;
7462 }
7463
7464 /*
7465  * Must not be invoked with interrupt sources disabled and
7466  * the hardware shutdown down.
7467  */
7468 static void tg3_free_consistent(struct tg3 *tp)
7469 {
7470         int i;
7471
7472         for (i = 0; i < tp->irq_cnt; i++) {
7473                 struct tg3_napi *tnapi = &tp->napi[i];
7474
7475                 if (tnapi->tx_ring) {
7476                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7477                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7478                         tnapi->tx_ring = NULL;
7479                 }
7480
7481                 kfree(tnapi->tx_buffers);
7482                 tnapi->tx_buffers = NULL;
7483
7484                 if (tnapi->rx_rcb) {
7485                         dma_free_coherent(&tp->pdev->dev,
7486                                           TG3_RX_RCB_RING_BYTES(tp),
7487                                           tnapi->rx_rcb,
7488                                           tnapi->rx_rcb_mapping);
7489                         tnapi->rx_rcb = NULL;
7490                 }
7491
7492                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7493
7494                 if (tnapi->hw_status) {
7495                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7496                                           tnapi->hw_status,
7497                                           tnapi->status_mapping);
7498                         tnapi->hw_status = NULL;
7499                 }
7500         }
7501
7502         if (tp->hw_stats) {
7503                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7504                                   tp->hw_stats, tp->stats_mapping);
7505                 tp->hw_stats = NULL;
7506         }
7507 }
7508
7509 /*
7510  * Must not be invoked with interrupt sources disabled and
7511  * the hardware shutdown down.  Can sleep.
7512  */
7513 static int tg3_alloc_consistent(struct tg3 *tp)
7514 {
7515         int i;
7516
7517         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7518                                           sizeof(struct tg3_hw_stats),
7519                                           &tp->stats_mapping,
7520                                           GFP_KERNEL);
7521         if (!tp->hw_stats)
7522                 goto err_out;
7523
7524         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7525
7526         for (i = 0; i < tp->irq_cnt; i++) {
7527                 struct tg3_napi *tnapi = &tp->napi[i];
7528                 struct tg3_hw_status *sblk;
7529
7530                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7531                                                       TG3_HW_STATUS_SIZE,
7532                                                       &tnapi->status_mapping,
7533                                                       GFP_KERNEL);
7534                 if (!tnapi->hw_status)
7535                         goto err_out;
7536
7537                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7538                 sblk = tnapi->hw_status;
7539
7540                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7541                         goto err_out;
7542
7543                 /* If multivector TSS is enabled, vector 0 does not handle
7544                  * tx interrupts.  Don't allocate any resources for it.
7545                  */
7546                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7547                     (i && tg3_flag(tp, ENABLE_TSS))) {
7548                         tnapi->tx_buffers = kzalloc(
7549                                                sizeof(struct tg3_tx_ring_info) *
7550                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7551                         if (!tnapi->tx_buffers)
7552                                 goto err_out;
7553
7554                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7555                                                             TG3_TX_RING_BYTES,
7556                                                         &tnapi->tx_desc_mapping,
7557                                                             GFP_KERNEL);
7558                         if (!tnapi->tx_ring)
7559                                 goto err_out;
7560                 }
7561
7562                 /*
7563                  * When RSS is enabled, the status block format changes
7564                  * slightly.  The "rx_jumbo_consumer", "reserved",
7565                  * and "rx_mini_consumer" members get mapped to the
7566                  * other three rx return ring producer indexes.
7567                  */
7568                 switch (i) {
7569                 default:
7570                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7571                         break;
7572                 case 2:
7573                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7574                         break;
7575                 case 3:
7576                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7577                         break;
7578                 case 4:
7579                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7580                         break;
7581                 }
7582
7583                 /*
7584                  * If multivector RSS is enabled, vector 0 does not handle
7585                  * rx or tx interrupts.  Don't allocate any resources for it.
7586                  */
7587                 if (!i && tg3_flag(tp, ENABLE_RSS))
7588                         continue;
7589
7590                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7591                                                    TG3_RX_RCB_RING_BYTES(tp),
7592                                                    &tnapi->rx_rcb_mapping,
7593                                                    GFP_KERNEL);
7594                 if (!tnapi->rx_rcb)
7595                         goto err_out;
7596
7597                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7598         }
7599
7600         return 0;
7601
7602 err_out:
7603         tg3_free_consistent(tp);
7604         return -ENOMEM;
7605 }
7606
7607 #define MAX_WAIT_CNT 1000
7608
7609 /* To stop a block, clear the enable bit and poll till it
7610  * clears.  tp->lock is held.
7611  */
7612 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7613 {
7614         unsigned int i;
7615         u32 val;
7616
7617         if (tg3_flag(tp, 5705_PLUS)) {
7618                 switch (ofs) {
7619                 case RCVLSC_MODE:
7620                 case DMAC_MODE:
7621                 case MBFREE_MODE:
7622                 case BUFMGR_MODE:
7623                 case MEMARB_MODE:
7624                         /* We can't enable/disable these bits of the
7625                          * 5705/5750, just say success.
7626                          */
7627                         return 0;
7628
7629                 default:
7630                         break;
7631                 }
7632         }
7633
7634         val = tr32(ofs);
7635         val &= ~enable_bit;
7636         tw32_f(ofs, val);
7637
7638         for (i = 0; i < MAX_WAIT_CNT; i++) {
7639                 udelay(100);
7640                 val = tr32(ofs);
7641                 if ((val & enable_bit) == 0)
7642                         break;
7643         }
7644
7645         if (i == MAX_WAIT_CNT && !silent) {
7646                 dev_err(&tp->pdev->dev,
7647                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7648                         ofs, enable_bit);
7649                 return -ENODEV;
7650         }
7651
7652         return 0;
7653 }
7654
7655 /* tp->lock is held. */
7656 static int tg3_abort_hw(struct tg3 *tp, int silent)
7657 {
7658         int i, err;
7659
7660         tg3_disable_ints(tp);
7661
7662         tp->rx_mode &= ~RX_MODE_ENABLE;
7663         tw32_f(MAC_RX_MODE, tp->rx_mode);
7664         udelay(10);
7665
7666         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7667         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7668         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7669         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7670         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7671         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7672
7673         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7674         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7675         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7676         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7677         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7678         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7679         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7680
7681         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7682         tw32_f(MAC_MODE, tp->mac_mode);
7683         udelay(40);
7684
7685         tp->tx_mode &= ~TX_MODE_ENABLE;
7686         tw32_f(MAC_TX_MODE, tp->tx_mode);
7687
7688         for (i = 0; i < MAX_WAIT_CNT; i++) {
7689                 udelay(100);
7690                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7691                         break;
7692         }
7693         if (i >= MAX_WAIT_CNT) {
7694                 dev_err(&tp->pdev->dev,
7695                         "%s timed out, TX_MODE_ENABLE will not clear "
7696                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7697                 err |= -ENODEV;
7698         }
7699
7700         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7701         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7702         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7703
7704         tw32(FTQ_RESET, 0xffffffff);
7705         tw32(FTQ_RESET, 0x00000000);
7706
7707         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7708         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7709
7710         for (i = 0; i < tp->irq_cnt; i++) {
7711                 struct tg3_napi *tnapi = &tp->napi[i];
7712                 if (tnapi->hw_status)
7713                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7714         }
7715
7716         return err;
7717 }
7718
7719 /* Save PCI command register before chip reset */
7720 static void tg3_save_pci_state(struct tg3 *tp)
7721 {
7722         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7723 }
7724
7725 /* Restore PCI state after chip reset */
7726 static void tg3_restore_pci_state(struct tg3 *tp)
7727 {
7728         u32 val;
7729
7730         /* Re-enable indirect register accesses. */
7731         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7732                                tp->misc_host_ctrl);
7733
7734         /* Set MAX PCI retry to zero. */
7735         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7736         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7737             tg3_flag(tp, PCIX_MODE))
7738                 val |= PCISTATE_RETRY_SAME_DMA;
7739         /* Allow reads and writes to the APE register and memory space. */
7740         if (tg3_flag(tp, ENABLE_APE))
7741                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7742                        PCISTATE_ALLOW_APE_SHMEM_WR |
7743                        PCISTATE_ALLOW_APE_PSPACE_WR;
7744         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7745
7746         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7747
7748         if (!tg3_flag(tp, PCI_EXPRESS)) {
7749                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7750                                       tp->pci_cacheline_sz);
7751                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7752                                       tp->pci_lat_timer);
7753         }
7754
7755         /* Make sure PCI-X relaxed ordering bit is clear. */
7756         if (tg3_flag(tp, PCIX_MODE)) {
7757                 u16 pcix_cmd;
7758
7759                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7760                                      &pcix_cmd);
7761                 pcix_cmd &= ~PCI_X_CMD_ERO;
7762                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7763                                       pcix_cmd);
7764         }
7765
7766         if (tg3_flag(tp, 5780_CLASS)) {
7767
7768                 /* Chip reset on 5780 will reset MSI enable bit,
7769                  * so need to restore it.
7770                  */
7771                 if (tg3_flag(tp, USING_MSI)) {
7772                         u16 ctrl;
7773
7774                         pci_read_config_word(tp->pdev,
7775                                              tp->msi_cap + PCI_MSI_FLAGS,
7776                                              &ctrl);
7777                         pci_write_config_word(tp->pdev,
7778                                               tp->msi_cap + PCI_MSI_FLAGS,
7779                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7780                         val = tr32(MSGINT_MODE);
7781                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7782                 }
7783         }
7784 }
7785
7786 /* tp->lock is held. */
7787 static int tg3_chip_reset(struct tg3 *tp)
7788 {
7789         u32 val;
7790         void (*write_op)(struct tg3 *, u32, u32);
7791         int i, err;
7792
7793         tg3_nvram_lock(tp);
7794
7795         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7796
7797         /* No matching tg3_nvram_unlock() after this because
7798          * chip reset below will undo the nvram lock.
7799          */
7800         tp->nvram_lock_cnt = 0;
7801
7802         /* GRC_MISC_CFG core clock reset will clear the memory
7803          * enable bit in PCI register 4 and the MSI enable bit
7804          * on some chips, so we save relevant registers here.
7805          */
7806         tg3_save_pci_state(tp);
7807
7808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7809             tg3_flag(tp, 5755_PLUS))
7810                 tw32(GRC_FASTBOOT_PC, 0);
7811
7812         /*
7813          * We must avoid the readl() that normally takes place.
7814          * It locks machines, causes machine checks, and other
7815          * fun things.  So, temporarily disable the 5701
7816          * hardware workaround, while we do the reset.
7817          */
7818         write_op = tp->write32;
7819         if (write_op == tg3_write_flush_reg32)
7820                 tp->write32 = tg3_write32;
7821
7822         /* Prevent the irq handler from reading or writing PCI registers
7823          * during chip reset when the memory enable bit in the PCI command
7824          * register may be cleared.  The chip does not generate interrupt
7825          * at this time, but the irq handler may still be called due to irq
7826          * sharing or irqpoll.
7827          */
7828         tg3_flag_set(tp, CHIP_RESETTING);
7829         for (i = 0; i < tp->irq_cnt; i++) {
7830                 struct tg3_napi *tnapi = &tp->napi[i];
7831                 if (tnapi->hw_status) {
7832                         tnapi->hw_status->status = 0;
7833                         tnapi->hw_status->status_tag = 0;
7834                 }
7835                 tnapi->last_tag = 0;
7836                 tnapi->last_irq_tag = 0;
7837         }
7838         smp_mb();
7839
7840         for (i = 0; i < tp->irq_cnt; i++)
7841                 synchronize_irq(tp->napi[i].irq_vec);
7842
7843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7844                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7845                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7846         }
7847
7848         /* do the reset */
7849         val = GRC_MISC_CFG_CORECLK_RESET;
7850
7851         if (tg3_flag(tp, PCI_EXPRESS)) {
7852                 /* Force PCIe 1.0a mode */
7853                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7854                     !tg3_flag(tp, 57765_PLUS) &&
7855                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7856                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7857                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7858
7859                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7860                         tw32(GRC_MISC_CFG, (1 << 29));
7861                         val |= (1 << 29);
7862                 }
7863         }
7864
7865         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7866                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7867                 tw32(GRC_VCPU_EXT_CTRL,
7868                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7869         }
7870
7871         /* Manage gphy power for all CPMU absent PCIe devices. */
7872         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7873                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7874
7875         tw32(GRC_MISC_CFG, val);
7876
7877         /* restore 5701 hardware bug workaround write method */
7878         tp->write32 = write_op;
7879
7880         /* Unfortunately, we have to delay before the PCI read back.
7881          * Some 575X chips even will not respond to a PCI cfg access
7882          * when the reset command is given to the chip.
7883          *
7884          * How do these hardware designers expect things to work
7885          * properly if the PCI write is posted for a long period
7886          * of time?  It is always necessary to have some method by
7887          * which a register read back can occur to push the write
7888          * out which does the reset.
7889          *
7890          * For most tg3 variants the trick below was working.
7891          * Ho hum...
7892          */
7893         udelay(120);
7894
7895         /* Flush PCI posted writes.  The normal MMIO registers
7896          * are inaccessible at this time so this is the only
7897          * way to make this reliably (actually, this is no longer
7898          * the case, see above).  I tried to use indirect
7899          * register read/write but this upset some 5701 variants.
7900          */
7901         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7902
7903         udelay(120);
7904
7905         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7906                 u16 val16;
7907
7908                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7909                         int i;
7910                         u32 cfg_val;
7911
7912                         /* Wait for link training to complete.  */
7913                         for (i = 0; i < 5000; i++)
7914                                 udelay(100);
7915
7916                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7917                         pci_write_config_dword(tp->pdev, 0xc4,
7918                                                cfg_val | (1 << 15));
7919                 }
7920
7921                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7922                 pci_read_config_word(tp->pdev,
7923                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7924                                      &val16);
7925                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7926                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7927                 /*
7928                  * Older PCIe devices only support the 128 byte
7929                  * MPS setting.  Enforce the restriction.
7930                  */
7931                 if (!tg3_flag(tp, CPMU_PRESENT))
7932                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7933                 pci_write_config_word(tp->pdev,
7934                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7935                                       val16);
7936
7937                 /* Clear error status */
7938                 pci_write_config_word(tp->pdev,
7939                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7940                                       PCI_EXP_DEVSTA_CED |
7941                                       PCI_EXP_DEVSTA_NFED |
7942                                       PCI_EXP_DEVSTA_FED |
7943                                       PCI_EXP_DEVSTA_URD);
7944         }
7945
7946         tg3_restore_pci_state(tp);
7947
7948         tg3_flag_clear(tp, CHIP_RESETTING);
7949         tg3_flag_clear(tp, ERROR_PROCESSED);
7950
7951         val = 0;
7952         if (tg3_flag(tp, 5780_CLASS))
7953                 val = tr32(MEMARB_MODE);
7954         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7955
7956         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7957                 tg3_stop_fw(tp);
7958                 tw32(0x5000, 0x400);
7959         }
7960
7961         tw32(GRC_MODE, tp->grc_mode);
7962
7963         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7964                 val = tr32(0xc4);
7965
7966                 tw32(0xc4, val | (1 << 15));
7967         }
7968
7969         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7971                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7972                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7973                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7974                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7975         }
7976
7977         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7978                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7979                 val = tp->mac_mode;
7980         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7981                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7982                 val = tp->mac_mode;
7983         } else
7984                 val = 0;
7985
7986         tw32_f(MAC_MODE, val);
7987         udelay(40);
7988
7989         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7990
7991         err = tg3_poll_fw(tp);
7992         if (err)
7993                 return err;
7994
7995         tg3_mdio_start(tp);
7996
7997         if (tg3_flag(tp, PCI_EXPRESS) &&
7998             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7999             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8000             !tg3_flag(tp, 57765_PLUS)) {
8001                 val = tr32(0x7c00);
8002
8003                 tw32(0x7c00, val | (1 << 25));
8004         }
8005
8006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8007                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8008                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8009         }
8010
8011         /* Reprobe ASF enable state.  */
8012         tg3_flag_clear(tp, ENABLE_ASF);
8013         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8014         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8015         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8016                 u32 nic_cfg;
8017
8018                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8019                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8020                         tg3_flag_set(tp, ENABLE_ASF);
8021                         tp->last_event_jiffies = jiffies;
8022                         if (tg3_flag(tp, 5750_PLUS))
8023                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8024                 }
8025         }
8026
8027         return 0;
8028 }
8029
8030 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8031 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8032
8033 /* tp->lock is held. */
8034 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8035 {
8036         int err;
8037
8038         tg3_stop_fw(tp);
8039
8040         tg3_write_sig_pre_reset(tp, kind);
8041
8042         tg3_abort_hw(tp, silent);
8043         err = tg3_chip_reset(tp);
8044
8045         __tg3_set_mac_addr(tp, 0);
8046
8047         tg3_write_sig_legacy(tp, kind);
8048         tg3_write_sig_post_reset(tp, kind);
8049
8050         if (tp->hw_stats) {
8051                 /* Save the stats across chip resets... */
8052                 tg3_get_nstats(tp, &tp->net_stats_prev);
8053                 tg3_get_estats(tp, &tp->estats_prev);
8054
8055                 /* And make sure the next sample is new data */
8056                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8057         }
8058
8059         if (err)
8060                 return err;
8061
8062         return 0;
8063 }
8064
8065 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8066 {
8067         struct tg3 *tp = netdev_priv(dev);
8068         struct sockaddr *addr = p;
8069         int err = 0, skip_mac_1 = 0;
8070
8071         if (!is_valid_ether_addr(addr->sa_data))
8072                 return -EADDRNOTAVAIL;
8073
8074         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8075
8076         if (!netif_running(dev))
8077                 return 0;
8078
8079         if (tg3_flag(tp, ENABLE_ASF)) {
8080                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8081
8082                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8083                 addr0_low = tr32(MAC_ADDR_0_LOW);
8084                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8085                 addr1_low = tr32(MAC_ADDR_1_LOW);
8086
8087                 /* Skip MAC addr 1 if ASF is using it. */
8088                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8089                     !(addr1_high == 0 && addr1_low == 0))
8090                         skip_mac_1 = 1;
8091         }
8092         spin_lock_bh(&tp->lock);
8093         __tg3_set_mac_addr(tp, skip_mac_1);
8094         spin_unlock_bh(&tp->lock);
8095
8096         return err;
8097 }
8098
8099 /* tp->lock is held. */
8100 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8101                            dma_addr_t mapping, u32 maxlen_flags,
8102                            u32 nic_addr)
8103 {
8104         tg3_write_mem(tp,
8105                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8106                       ((u64) mapping >> 32));
8107         tg3_write_mem(tp,
8108                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8109                       ((u64) mapping & 0xffffffff));
8110         tg3_write_mem(tp,
8111                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8112                        maxlen_flags);
8113
8114         if (!tg3_flag(tp, 5705_PLUS))
8115                 tg3_write_mem(tp,
8116                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8117                               nic_addr);
8118 }
8119
8120 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8121 {
8122         int i;
8123
8124         if (!tg3_flag(tp, ENABLE_TSS)) {
8125                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8126                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8127                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8128         } else {
8129                 tw32(HOSTCC_TXCOL_TICKS, 0);
8130                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8131                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8132         }
8133
8134         if (!tg3_flag(tp, ENABLE_RSS)) {
8135                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8136                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8137                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8138         } else {
8139                 tw32(HOSTCC_RXCOL_TICKS, 0);
8140                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8141                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8142         }
8143
8144         if (!tg3_flag(tp, 5705_PLUS)) {
8145                 u32 val = ec->stats_block_coalesce_usecs;
8146
8147                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8148                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8149
8150                 if (!netif_carrier_ok(tp->dev))
8151                         val = 0;
8152
8153                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8154         }
8155
8156         for (i = 0; i < tp->irq_cnt - 1; i++) {
8157                 u32 reg;
8158
8159                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8160                 tw32(reg, ec->rx_coalesce_usecs);
8161                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8162                 tw32(reg, ec->rx_max_coalesced_frames);
8163                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8164                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8165
8166                 if (tg3_flag(tp, ENABLE_TSS)) {
8167                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8168                         tw32(reg, ec->tx_coalesce_usecs);
8169                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8170                         tw32(reg, ec->tx_max_coalesced_frames);
8171                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8172                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8173                 }
8174         }
8175
8176         for (; i < tp->irq_max - 1; i++) {
8177                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8178                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8179                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8180
8181                 if (tg3_flag(tp, ENABLE_TSS)) {
8182                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8183                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8184                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8185                 }
8186         }
8187 }
8188
8189 /* tp->lock is held. */
8190 static void tg3_rings_reset(struct tg3 *tp)
8191 {
8192         int i;
8193         u32 stblk, txrcb, rxrcb, limit;
8194         struct tg3_napi *tnapi = &tp->napi[0];
8195
8196         /* Disable all transmit rings but the first. */
8197         if (!tg3_flag(tp, 5705_PLUS))
8198                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8199         else if (tg3_flag(tp, 5717_PLUS))
8200                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8201         else if (tg3_flag(tp, 57765_CLASS))
8202                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8203         else
8204                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8205
8206         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8207              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8208                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8209                               BDINFO_FLAGS_DISABLED);
8210
8211
8212         /* Disable all receive return rings but the first. */
8213         if (tg3_flag(tp, 5717_PLUS))
8214                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8215         else if (!tg3_flag(tp, 5705_PLUS))
8216                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8217         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8218                  tg3_flag(tp, 57765_CLASS))
8219                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8220         else
8221                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8222
8223         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8224              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8225                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8226                               BDINFO_FLAGS_DISABLED);
8227
8228         /* Disable interrupts */
8229         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8230         tp->napi[0].chk_msi_cnt = 0;
8231         tp->napi[0].last_rx_cons = 0;
8232         tp->napi[0].last_tx_cons = 0;
8233
8234         /* Zero mailbox registers. */
8235         if (tg3_flag(tp, SUPPORT_MSIX)) {
8236                 for (i = 1; i < tp->irq_max; i++) {
8237                         tp->napi[i].tx_prod = 0;
8238                         tp->napi[i].tx_cons = 0;
8239                         if (tg3_flag(tp, ENABLE_TSS))
8240                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8241                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8242                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8243                         tp->napi[i].chk_msi_cnt = 0;
8244                         tp->napi[i].last_rx_cons = 0;
8245                         tp->napi[i].last_tx_cons = 0;
8246                 }
8247                 if (!tg3_flag(tp, ENABLE_TSS))
8248                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8249         } else {
8250                 tp->napi[0].tx_prod = 0;
8251                 tp->napi[0].tx_cons = 0;
8252                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8253                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8254         }
8255
8256         /* Make sure the NIC-based send BD rings are disabled. */
8257         if (!tg3_flag(tp, 5705_PLUS)) {
8258                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8259                 for (i = 0; i < 16; i++)
8260                         tw32_tx_mbox(mbox + i * 8, 0);
8261         }
8262
8263         txrcb = NIC_SRAM_SEND_RCB;
8264         rxrcb = NIC_SRAM_RCV_RET_RCB;
8265
8266         /* Clear status block in ram. */
8267         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8268
8269         /* Set status block DMA address */
8270         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8271              ((u64) tnapi->status_mapping >> 32));
8272         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8273              ((u64) tnapi->status_mapping & 0xffffffff));
8274
8275         if (tnapi->tx_ring) {
8276                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8277                                (TG3_TX_RING_SIZE <<
8278                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8279                                NIC_SRAM_TX_BUFFER_DESC);
8280                 txrcb += TG3_BDINFO_SIZE;
8281         }
8282
8283         if (tnapi->rx_rcb) {
8284                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8285                                (tp->rx_ret_ring_mask + 1) <<
8286                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8287                 rxrcb += TG3_BDINFO_SIZE;
8288         }
8289
8290         stblk = HOSTCC_STATBLCK_RING1;
8291
8292         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8293                 u64 mapping = (u64)tnapi->status_mapping;
8294                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8295                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8296
8297                 /* Clear status block in ram. */
8298                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8299
8300                 if (tnapi->tx_ring) {
8301                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8302                                        (TG3_TX_RING_SIZE <<
8303                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8304                                        NIC_SRAM_TX_BUFFER_DESC);
8305                         txrcb += TG3_BDINFO_SIZE;
8306                 }
8307
8308                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8309                                ((tp->rx_ret_ring_mask + 1) <<
8310                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8311
8312                 stblk += 8;
8313                 rxrcb += TG3_BDINFO_SIZE;
8314         }
8315 }
8316
8317 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8318 {
8319         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8320
8321         if (!tg3_flag(tp, 5750_PLUS) ||
8322             tg3_flag(tp, 5780_CLASS) ||
8323             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8324             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8325             tg3_flag(tp, 57765_PLUS))
8326                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8327         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8328                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8329                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8330         else
8331                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8332
8333         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8334         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8335
8336         val = min(nic_rep_thresh, host_rep_thresh);
8337         tw32(RCVBDI_STD_THRESH, val);
8338
8339         if (tg3_flag(tp, 57765_PLUS))
8340                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8341
8342         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8343                 return;
8344
8345         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8346
8347         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8348
8349         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8350         tw32(RCVBDI_JUMBO_THRESH, val);
8351
8352         if (tg3_flag(tp, 57765_PLUS))
8353                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8354 }
8355
8356 static inline u32 calc_crc(unsigned char *buf, int len)
8357 {
8358         u32 reg;
8359         u32 tmp;
8360         int j, k;
8361
8362         reg = 0xffffffff;
8363
8364         for (j = 0; j < len; j++) {
8365                 reg ^= buf[j];
8366
8367                 for (k = 0; k < 8; k++) {
8368                         tmp = reg & 0x01;
8369
8370                         reg >>= 1;
8371
8372                         if (tmp)
8373                                 reg ^= 0xedb88320;
8374                 }
8375         }
8376
8377         return ~reg;
8378 }
8379
8380 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8381 {
8382         /* accept or reject all multicast frames */
8383         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8384         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8385         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8386         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8387 }
8388
8389 static void __tg3_set_rx_mode(struct net_device *dev)
8390 {
8391         struct tg3 *tp = netdev_priv(dev);
8392         u32 rx_mode;
8393
8394         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8395                                   RX_MODE_KEEP_VLAN_TAG);
8396
8397 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8398         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8399          * flag clear.
8400          */
8401         if (!tg3_flag(tp, ENABLE_ASF))
8402                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8403 #endif
8404
8405         if (dev->flags & IFF_PROMISC) {
8406                 /* Promiscuous mode. */
8407                 rx_mode |= RX_MODE_PROMISC;
8408         } else if (dev->flags & IFF_ALLMULTI) {
8409                 /* Accept all multicast. */
8410                 tg3_set_multi(tp, 1);
8411         } else if (netdev_mc_empty(dev)) {
8412                 /* Reject all multicast. */
8413                 tg3_set_multi(tp, 0);
8414         } else {
8415                 /* Accept one or more multicast(s). */
8416                 struct netdev_hw_addr *ha;
8417                 u32 mc_filter[4] = { 0, };
8418                 u32 regidx;
8419                 u32 bit;
8420                 u32 crc;
8421
8422                 netdev_for_each_mc_addr(ha, dev) {
8423                         crc = calc_crc(ha->addr, ETH_ALEN);
8424                         bit = ~crc & 0x7f;
8425                         regidx = (bit & 0x60) >> 5;
8426                         bit &= 0x1f;
8427                         mc_filter[regidx] |= (1 << bit);
8428                 }
8429
8430                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8431                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8432                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8433                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8434         }
8435
8436         if (rx_mode != tp->rx_mode) {
8437                 tp->rx_mode = rx_mode;
8438                 tw32_f(MAC_RX_MODE, rx_mode);
8439                 udelay(10);
8440         }
8441 }
8442
8443 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8444 {
8445         int i;
8446
8447         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8448                 tp->rss_ind_tbl[i] =
8449                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8450 }
8451
8452 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8453 {
8454         int i;
8455
8456         if (!tg3_flag(tp, SUPPORT_MSIX))
8457                 return;
8458
8459         if (tp->irq_cnt <= 2) {
8460                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8461                 return;
8462         }
8463
8464         /* Validate table against current IRQ count */
8465         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8466                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8467                         break;
8468         }
8469
8470         if (i != TG3_RSS_INDIR_TBL_SIZE)
8471                 tg3_rss_init_dflt_indir_tbl(tp);
8472 }
8473
8474 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8475 {
8476         int i = 0;
8477         u32 reg = MAC_RSS_INDIR_TBL_0;
8478
8479         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8480                 u32 val = tp->rss_ind_tbl[i];
8481                 i++;
8482                 for (; i % 8; i++) {
8483                         val <<= 4;
8484                         val |= tp->rss_ind_tbl[i];
8485                 }
8486                 tw32(reg, val);
8487                 reg += 4;
8488         }
8489 }
8490
8491 /* tp->lock is held. */
8492 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8493 {
8494         u32 val, rdmac_mode;
8495         int i, err, limit;
8496         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8497
8498         tg3_disable_ints(tp);
8499
8500         tg3_stop_fw(tp);
8501
8502         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8503
8504         if (tg3_flag(tp, INIT_COMPLETE))
8505                 tg3_abort_hw(tp, 1);
8506
8507         /* Enable MAC control of LPI */
8508         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8509                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8510                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8511                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8512
8513                 tw32_f(TG3_CPMU_EEE_CTRL,
8514                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8515
8516                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8517                       TG3_CPMU_EEEMD_LPI_IN_TX |
8518                       TG3_CPMU_EEEMD_LPI_IN_RX |
8519                       TG3_CPMU_EEEMD_EEE_ENABLE;
8520
8521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8522                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8523
8524                 if (tg3_flag(tp, ENABLE_APE))
8525                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8526
8527                 tw32_f(TG3_CPMU_EEE_MODE, val);
8528
8529                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8530                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8531                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8532
8533                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8534                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8535                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8536         }
8537
8538         if (reset_phy)
8539                 tg3_phy_reset(tp);
8540
8541         err = tg3_chip_reset(tp);
8542         if (err)
8543                 return err;
8544
8545         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8546
8547         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8548                 val = tr32(TG3_CPMU_CTRL);
8549                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8550                 tw32(TG3_CPMU_CTRL, val);
8551
8552                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8553                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8554                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8555                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8556
8557                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8558                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8559                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8560                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8561
8562                 val = tr32(TG3_CPMU_HST_ACC);
8563                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8564                 val |= CPMU_HST_ACC_MACCLK_6_25;
8565                 tw32(TG3_CPMU_HST_ACC, val);
8566         }
8567
8568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8569                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8570                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8571                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8572                 tw32(PCIE_PWR_MGMT_THRESH, val);
8573
8574                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8575                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8576
8577                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8578
8579                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8580                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8581         }
8582
8583         if (tg3_flag(tp, L1PLLPD_EN)) {
8584                 u32 grc_mode = tr32(GRC_MODE);
8585
8586                 /* Access the lower 1K of PL PCIE block registers. */
8587                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8588                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8589
8590                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8591                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8592                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8593
8594                 tw32(GRC_MODE, grc_mode);
8595         }
8596
8597         if (tg3_flag(tp, 57765_CLASS)) {
8598                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8599                         u32 grc_mode = tr32(GRC_MODE);
8600
8601                         /* Access the lower 1K of PL PCIE block registers. */
8602                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8603                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8604
8605                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8606                                    TG3_PCIE_PL_LO_PHYCTL5);
8607                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8608                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8609
8610                         tw32(GRC_MODE, grc_mode);
8611                 }
8612
8613                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8614                         u32 grc_mode = tr32(GRC_MODE);
8615
8616                         /* Access the lower 1K of DL PCIE block registers. */
8617                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8618                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8619
8620                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8621                                    TG3_PCIE_DL_LO_FTSMAX);
8622                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8623                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8624                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8625
8626                         tw32(GRC_MODE, grc_mode);
8627                 }
8628
8629                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8630                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8631                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8632                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8633         }
8634
8635         /* This works around an issue with Athlon chipsets on
8636          * B3 tigon3 silicon.  This bit has no effect on any
8637          * other revision.  But do not set this on PCI Express
8638          * chips and don't even touch the clocks if the CPMU is present.
8639          */
8640         if (!tg3_flag(tp, CPMU_PRESENT)) {
8641                 if (!tg3_flag(tp, PCI_EXPRESS))
8642                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8643                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8644         }
8645
8646         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8647             tg3_flag(tp, PCIX_MODE)) {
8648                 val = tr32(TG3PCI_PCISTATE);
8649                 val |= PCISTATE_RETRY_SAME_DMA;
8650                 tw32(TG3PCI_PCISTATE, val);
8651         }
8652
8653         if (tg3_flag(tp, ENABLE_APE)) {
8654                 /* Allow reads and writes to the
8655                  * APE register and memory space.
8656                  */
8657                 val = tr32(TG3PCI_PCISTATE);
8658                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8659                        PCISTATE_ALLOW_APE_SHMEM_WR |
8660                        PCISTATE_ALLOW_APE_PSPACE_WR;
8661                 tw32(TG3PCI_PCISTATE, val);
8662         }
8663
8664         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8665                 /* Enable some hw fixes.  */
8666                 val = tr32(TG3PCI_MSI_DATA);
8667                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8668                 tw32(TG3PCI_MSI_DATA, val);
8669         }
8670
8671         /* Descriptor ring init may make accesses to the
8672          * NIC SRAM area to setup the TX descriptors, so we
8673          * can only do this after the hardware has been
8674          * successfully reset.
8675          */
8676         err = tg3_init_rings(tp);
8677         if (err)
8678                 return err;
8679
8680         if (tg3_flag(tp, 57765_PLUS)) {
8681                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8682                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8683                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8684                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8685                 if (!tg3_flag(tp, 57765_CLASS) &&
8686                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8687                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8688                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8689         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8690                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8691                 /* This value is determined during the probe time DMA
8692                  * engine test, tg3_test_dma.
8693                  */
8694                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8695         }
8696
8697         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8698                           GRC_MODE_4X_NIC_SEND_RINGS |
8699                           GRC_MODE_NO_TX_PHDR_CSUM |
8700                           GRC_MODE_NO_RX_PHDR_CSUM);
8701         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8702
8703         /* Pseudo-header checksum is done by hardware logic and not
8704          * the offload processers, so make the chip do the pseudo-
8705          * header checksums on receive.  For transmit it is more
8706          * convenient to do the pseudo-header checksum in software
8707          * as Linux does that on transmit for us in all cases.
8708          */
8709         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8710
8711         tw32(GRC_MODE,
8712              tp->grc_mode |
8713              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8714
8715         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8716         val = tr32(GRC_MISC_CFG);
8717         val &= ~0xff;
8718         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8719         tw32(GRC_MISC_CFG, val);
8720
8721         /* Initialize MBUF/DESC pool. */
8722         if (tg3_flag(tp, 5750_PLUS)) {
8723                 /* Do nothing.  */
8724         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8725                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8726                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8727                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8728                 else
8729                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8730                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8731                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8732         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8733                 int fw_len;
8734
8735                 fw_len = tp->fw_len;
8736                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8737                 tw32(BUFMGR_MB_POOL_ADDR,
8738                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8739                 tw32(BUFMGR_MB_POOL_SIZE,
8740                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8741         }
8742
8743         if (tp->dev->mtu <= ETH_DATA_LEN) {
8744                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8745                      tp->bufmgr_config.mbuf_read_dma_low_water);
8746                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8747                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8748                 tw32(BUFMGR_MB_HIGH_WATER,
8749                      tp->bufmgr_config.mbuf_high_water);
8750         } else {
8751                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8752                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8753                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8754                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8755                 tw32(BUFMGR_MB_HIGH_WATER,
8756                      tp->bufmgr_config.mbuf_high_water_jumbo);
8757         }
8758         tw32(BUFMGR_DMA_LOW_WATER,
8759              tp->bufmgr_config.dma_low_water);
8760         tw32(BUFMGR_DMA_HIGH_WATER,
8761              tp->bufmgr_config.dma_high_water);
8762
8763         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8765                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8767             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8768             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8769                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8770         tw32(BUFMGR_MODE, val);
8771         for (i = 0; i < 2000; i++) {
8772                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8773                         break;
8774                 udelay(10);
8775         }
8776         if (i >= 2000) {
8777                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8778                 return -ENODEV;
8779         }
8780
8781         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8782                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8783
8784         tg3_setup_rxbd_thresholds(tp);
8785
8786         /* Initialize TG3_BDINFO's at:
8787          *  RCVDBDI_STD_BD:     standard eth size rx ring
8788          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8789          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8790          *
8791          * like so:
8792          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8793          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8794          *                              ring attribute flags
8795          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8796          *
8797          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8798          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8799          *
8800          * The size of each ring is fixed in the firmware, but the location is
8801          * configurable.
8802          */
8803         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8804              ((u64) tpr->rx_std_mapping >> 32));
8805         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8806              ((u64) tpr->rx_std_mapping & 0xffffffff));
8807         if (!tg3_flag(tp, 5717_PLUS))
8808                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8809                      NIC_SRAM_RX_BUFFER_DESC);
8810
8811         /* Disable the mini ring */
8812         if (!tg3_flag(tp, 5705_PLUS))
8813                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8814                      BDINFO_FLAGS_DISABLED);
8815
8816         /* Program the jumbo buffer descriptor ring control
8817          * blocks on those devices that have them.
8818          */
8819         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8820             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8821
8822                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8823                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8824                              ((u64) tpr->rx_jmb_mapping >> 32));
8825                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8826                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8827                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8828                               BDINFO_FLAGS_MAXLEN_SHIFT;
8829                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8830                              val | BDINFO_FLAGS_USE_EXT_RECV);
8831                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8832                             tg3_flag(tp, 57765_CLASS))
8833                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8834                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8835                 } else {
8836                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8837                              BDINFO_FLAGS_DISABLED);
8838                 }
8839
8840                 if (tg3_flag(tp, 57765_PLUS)) {
8841                         val = TG3_RX_STD_RING_SIZE(tp);
8842                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8843                         val |= (TG3_RX_STD_DMA_SZ << 2);
8844                 } else
8845                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8846         } else
8847                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8848
8849         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8850
8851         tpr->rx_std_prod_idx = tp->rx_pending;
8852         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8853
8854         tpr->rx_jmb_prod_idx =
8855                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8856         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8857
8858         tg3_rings_reset(tp);
8859
8860         /* Initialize MAC address and backoff seed. */
8861         __tg3_set_mac_addr(tp, 0);
8862
8863         /* MTU + ethernet header + FCS + optional VLAN tag */
8864         tw32(MAC_RX_MTU_SIZE,
8865              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8866
8867         /* The slot time is changed by tg3_setup_phy if we
8868          * run at gigabit with half duplex.
8869          */
8870         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8871               (6 << TX_LENGTHS_IPG_SHIFT) |
8872               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8873
8874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8875                 val |= tr32(MAC_TX_LENGTHS) &
8876                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8877                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8878
8879         tw32(MAC_TX_LENGTHS, val);
8880
8881         /* Receive rules. */
8882         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8883         tw32(RCVLPC_CONFIG, 0x0181);
8884
8885         /* Calculate RDMAC_MODE setting early, we need it to determine
8886          * the RCVLPC_STATE_ENABLE mask.
8887          */
8888         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8889                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8890                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8891                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8892                       RDMAC_MODE_LNGREAD_ENAB);
8893
8894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8895                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8896
8897         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8898             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8899             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8900                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8901                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8902                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8903
8904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8905             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8906                 if (tg3_flag(tp, TSO_CAPABLE) &&
8907                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8908                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8909                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8910                            !tg3_flag(tp, IS_5788)) {
8911                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8912                 }
8913         }
8914
8915         if (tg3_flag(tp, PCI_EXPRESS))
8916                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8917
8918         if (tg3_flag(tp, HW_TSO_1) ||
8919             tg3_flag(tp, HW_TSO_2) ||
8920             tg3_flag(tp, HW_TSO_3))
8921                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8922
8923         if (tg3_flag(tp, 57765_PLUS) ||
8924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8926                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8927
8928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8929                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8930
8931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8935             tg3_flag(tp, 57765_PLUS)) {
8936                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8937                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8938                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8939                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8940                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8941                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8942                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8943                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8944                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8945                 }
8946                 tw32(TG3_RDMA_RSRVCTRL_REG,
8947                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8948         }
8949
8950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8952                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8953                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8954                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8955                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8956         }
8957
8958         /* Receive/send statistics. */
8959         if (tg3_flag(tp, 5750_PLUS)) {
8960                 val = tr32(RCVLPC_STATS_ENABLE);
8961                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8962                 tw32(RCVLPC_STATS_ENABLE, val);
8963         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8964                    tg3_flag(tp, TSO_CAPABLE)) {
8965                 val = tr32(RCVLPC_STATS_ENABLE);
8966                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8967                 tw32(RCVLPC_STATS_ENABLE, val);
8968         } else {
8969                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8970         }
8971         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8972         tw32(SNDDATAI_STATSENAB, 0xffffff);
8973         tw32(SNDDATAI_STATSCTRL,
8974              (SNDDATAI_SCTRL_ENABLE |
8975               SNDDATAI_SCTRL_FASTUPD));
8976
8977         /* Setup host coalescing engine. */
8978         tw32(HOSTCC_MODE, 0);
8979         for (i = 0; i < 2000; i++) {
8980                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8981                         break;
8982                 udelay(10);
8983         }
8984
8985         __tg3_set_coalesce(tp, &tp->coal);
8986
8987         if (!tg3_flag(tp, 5705_PLUS)) {
8988                 /* Status/statistics block address.  See tg3_timer,
8989                  * the tg3_periodic_fetch_stats call there, and
8990                  * tg3_get_stats to see how this works for 5705/5750 chips.
8991                  */
8992                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8993                      ((u64) tp->stats_mapping >> 32));
8994                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8995                      ((u64) tp->stats_mapping & 0xffffffff));
8996                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8997
8998                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8999
9000                 /* Clear statistics and status block memory areas */
9001                 for (i = NIC_SRAM_STATS_BLK;
9002                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9003                      i += sizeof(u32)) {
9004                         tg3_write_mem(tp, i, 0);
9005                         udelay(40);
9006                 }
9007         }
9008
9009         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9010
9011         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9012         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9013         if (!tg3_flag(tp, 5705_PLUS))
9014                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9015
9016         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9017                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9018                 /* reset to prevent losing 1st rx packet intermittently */
9019                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9020                 udelay(10);
9021         }
9022
9023         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9024                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9025                         MAC_MODE_FHDE_ENABLE;
9026         if (tg3_flag(tp, ENABLE_APE))
9027                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9028         if (!tg3_flag(tp, 5705_PLUS) &&
9029             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9030             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9031                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9032         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9033         udelay(40);
9034
9035         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9036          * If TG3_FLAG_IS_NIC is zero, we should read the
9037          * register to preserve the GPIO settings for LOMs. The GPIOs,
9038          * whether used as inputs or outputs, are set by boot code after
9039          * reset.
9040          */
9041         if (!tg3_flag(tp, IS_NIC)) {
9042                 u32 gpio_mask;
9043
9044                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9045                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9046                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9047
9048                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9049                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9050                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9051
9052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9053                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9054
9055                 tp->grc_local_ctrl &= ~gpio_mask;
9056                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9057
9058                 /* GPIO1 must be driven high for eeprom write protect */
9059                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9060                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9061                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9062         }
9063         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9064         udelay(100);
9065
9066         if (tg3_flag(tp, USING_MSIX)) {
9067                 val = tr32(MSGINT_MODE);
9068                 val |= MSGINT_MODE_ENABLE;
9069                 if (tp->irq_cnt > 1)
9070                         val |= MSGINT_MODE_MULTIVEC_EN;
9071                 if (!tg3_flag(tp, 1SHOT_MSI))
9072                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9073                 tw32(MSGINT_MODE, val);
9074         }
9075
9076         if (!tg3_flag(tp, 5705_PLUS)) {
9077                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9078                 udelay(40);
9079         }
9080
9081         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9082                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9083                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9084                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9085                WDMAC_MODE_LNGREAD_ENAB);
9086
9087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9088             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9089                 if (tg3_flag(tp, TSO_CAPABLE) &&
9090                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9091                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9092                         /* nothing */
9093                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9094                            !tg3_flag(tp, IS_5788)) {
9095                         val |= WDMAC_MODE_RX_ACCEL;
9096                 }
9097         }
9098
9099         /* Enable host coalescing bug fix */
9100         if (tg3_flag(tp, 5755_PLUS))
9101                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9102
9103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9104                 val |= WDMAC_MODE_BURST_ALL_DATA;
9105
9106         tw32_f(WDMAC_MODE, val);
9107         udelay(40);
9108
9109         if (tg3_flag(tp, PCIX_MODE)) {
9110                 u16 pcix_cmd;
9111
9112                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9113                                      &pcix_cmd);
9114                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9115                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9116                         pcix_cmd |= PCI_X_CMD_READ_2K;
9117                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9118                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9119                         pcix_cmd |= PCI_X_CMD_READ_2K;
9120                 }
9121                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9122                                       pcix_cmd);
9123         }
9124
9125         tw32_f(RDMAC_MODE, rdmac_mode);
9126         udelay(40);
9127
9128         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9129         if (!tg3_flag(tp, 5705_PLUS))
9130                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9131
9132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9133                 tw32(SNDDATAC_MODE,
9134                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9135         else
9136                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9137
9138         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9139         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9140         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9141         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9142                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9143         tw32(RCVDBDI_MODE, val);
9144         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9145         if (tg3_flag(tp, HW_TSO_1) ||
9146             tg3_flag(tp, HW_TSO_2) ||
9147             tg3_flag(tp, HW_TSO_3))
9148                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9149         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9150         if (tg3_flag(tp, ENABLE_TSS))
9151                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9152         tw32(SNDBDI_MODE, val);
9153         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9154
9155         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9156                 err = tg3_load_5701_a0_firmware_fix(tp);
9157                 if (err)
9158                         return err;
9159         }
9160
9161         if (tg3_flag(tp, TSO_CAPABLE)) {
9162                 err = tg3_load_tso_firmware(tp);
9163                 if (err)
9164                         return err;
9165         }
9166
9167         tp->tx_mode = TX_MODE_ENABLE;
9168
9169         if (tg3_flag(tp, 5755_PLUS) ||
9170             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9171                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9172
9173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9174                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9175                 tp->tx_mode &= ~val;
9176                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9177         }
9178
9179         tw32_f(MAC_TX_MODE, tp->tx_mode);
9180         udelay(100);
9181
9182         if (tg3_flag(tp, ENABLE_RSS)) {
9183                 tg3_rss_write_indir_tbl(tp);
9184
9185                 /* Setup the "secret" hash key. */
9186                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9187                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9188                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9189                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9190                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9191                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9192                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9193                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9194                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9195                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9196         }
9197
9198         tp->rx_mode = RX_MODE_ENABLE;
9199         if (tg3_flag(tp, 5755_PLUS))
9200                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9201
9202         if (tg3_flag(tp, ENABLE_RSS))
9203                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9204                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9205                                RX_MODE_RSS_IPV6_HASH_EN |
9206                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9207                                RX_MODE_RSS_IPV4_HASH_EN |
9208                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9209
9210         tw32_f(MAC_RX_MODE, tp->rx_mode);
9211         udelay(10);
9212
9213         tw32(MAC_LED_CTRL, tp->led_ctrl);
9214
9215         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9216         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9217                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9218                 udelay(10);
9219         }
9220         tw32_f(MAC_RX_MODE, tp->rx_mode);
9221         udelay(10);
9222
9223         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9224                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9225                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9226                         /* Set drive transmission level to 1.2V  */
9227                         /* only if the signal pre-emphasis bit is not set  */
9228                         val = tr32(MAC_SERDES_CFG);
9229                         val &= 0xfffff000;
9230                         val |= 0x880;
9231                         tw32(MAC_SERDES_CFG, val);
9232                 }
9233                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9234                         tw32(MAC_SERDES_CFG, 0x616000);
9235         }
9236
9237         /* Prevent chip from dropping frames when flow control
9238          * is enabled.
9239          */
9240         if (tg3_flag(tp, 57765_CLASS))
9241                 val = 1;
9242         else
9243                 val = 2;
9244         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9245
9246         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9247             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9248                 /* Use hardware link auto-negotiation */
9249                 tg3_flag_set(tp, HW_AUTONEG);
9250         }
9251
9252         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9253             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9254                 u32 tmp;
9255
9256                 tmp = tr32(SERDES_RX_CTRL);
9257                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9258                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9259                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9260                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9261         }
9262
9263         if (!tg3_flag(tp, USE_PHYLIB)) {
9264                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9265                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9266
9267                 err = tg3_setup_phy(tp, 0);
9268                 if (err)
9269                         return err;
9270
9271                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9272                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9273                         u32 tmp;
9274
9275                         /* Clear CRC stats. */
9276                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9277                                 tg3_writephy(tp, MII_TG3_TEST1,
9278                                              tmp | MII_TG3_TEST1_CRC_EN);
9279                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9280                         }
9281                 }
9282         }
9283
9284         __tg3_set_rx_mode(tp->dev);
9285
9286         /* Initialize receive rules. */
9287         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9288         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9289         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9290         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9291
9292         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9293                 limit = 8;
9294         else
9295                 limit = 16;
9296         if (tg3_flag(tp, ENABLE_ASF))
9297                 limit -= 4;
9298         switch (limit) {
9299         case 16:
9300                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9301         case 15:
9302                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9303         case 14:
9304                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9305         case 13:
9306                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9307         case 12:
9308                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9309         case 11:
9310                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9311         case 10:
9312                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9313         case 9:
9314                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9315         case 8:
9316                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9317         case 7:
9318                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9319         case 6:
9320                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9321         case 5:
9322                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9323         case 4:
9324                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9325         case 3:
9326                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9327         case 2:
9328         case 1:
9329
9330         default:
9331                 break;
9332         }
9333
9334         if (tg3_flag(tp, ENABLE_APE))
9335                 /* Write our heartbeat update interval to APE. */
9336                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9337                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9338
9339         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9340
9341         return 0;
9342 }
9343
9344 /* Called at device open time to get the chip ready for
9345  * packet processing.  Invoked with tp->lock held.
9346  */
9347 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9348 {
9349         tg3_switch_clocks(tp);
9350
9351         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9352
9353         return tg3_reset_hw(tp, reset_phy);
9354 }
9355
9356 #define TG3_STAT_ADD32(PSTAT, REG) \
9357 do {    u32 __val = tr32(REG); \
9358         (PSTAT)->low += __val; \
9359         if ((PSTAT)->low < __val) \
9360                 (PSTAT)->high += 1; \
9361 } while (0)
9362
9363 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9364 {
9365         struct tg3_hw_stats *sp = tp->hw_stats;
9366
9367         if (!netif_carrier_ok(tp->dev))
9368                 return;
9369
9370         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9371         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9372         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9373         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9374         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9375         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9376         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9377         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9378         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9379         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9380         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9381         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9382         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9383
9384         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9385         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9386         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9387         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9388         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9389         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9390         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9391         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9392         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9393         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9394         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9395         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9396         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9397         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9398
9399         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9400         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9401             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9402             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9403                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9404         } else {
9405                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9406                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9407                 if (val) {
9408                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9409                         sp->rx_discards.low += val;
9410                         if (sp->rx_discards.low < val)
9411                                 sp->rx_discards.high += 1;
9412                 }
9413                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9414         }
9415         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9416 }
9417
9418 static void tg3_chk_missed_msi(struct tg3 *tp)
9419 {
9420         u32 i;
9421
9422         for (i = 0; i < tp->irq_cnt; i++) {
9423                 struct tg3_napi *tnapi = &tp->napi[i];
9424
9425                 if (tg3_has_work(tnapi)) {
9426                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9427                             tnapi->last_tx_cons == tnapi->tx_cons) {
9428                                 if (tnapi->chk_msi_cnt < 1) {
9429                                         tnapi->chk_msi_cnt++;
9430                                         return;
9431                                 }
9432                                 tg3_msi(0, tnapi);
9433                         }
9434                 }
9435                 tnapi->chk_msi_cnt = 0;
9436                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9437                 tnapi->last_tx_cons = tnapi->tx_cons;
9438         }
9439 }
9440
9441 static void tg3_timer(unsigned long __opaque)
9442 {
9443         struct tg3 *tp = (struct tg3 *) __opaque;
9444
9445         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9446                 goto restart_timer;
9447
9448         spin_lock(&tp->lock);
9449
9450         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9451             tg3_flag(tp, 57765_CLASS))
9452                 tg3_chk_missed_msi(tp);
9453
9454         if (!tg3_flag(tp, TAGGED_STATUS)) {
9455                 /* All of this garbage is because when using non-tagged
9456                  * IRQ status the mailbox/status_block protocol the chip
9457                  * uses with the cpu is race prone.
9458                  */
9459                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9460                         tw32(GRC_LOCAL_CTRL,
9461                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9462                 } else {
9463                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9464                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9465                 }
9466
9467                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9468                         spin_unlock(&tp->lock);
9469                         tg3_reset_task_schedule(tp);
9470                         goto restart_timer;
9471                 }
9472         }
9473
9474         /* This part only runs once per second. */
9475         if (!--tp->timer_counter) {
9476                 if (tg3_flag(tp, 5705_PLUS))
9477                         tg3_periodic_fetch_stats(tp);
9478
9479                 if (tp->setlpicnt && !--tp->setlpicnt)
9480                         tg3_phy_eee_enable(tp);
9481
9482                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9483                         u32 mac_stat;
9484                         int phy_event;
9485
9486                         mac_stat = tr32(MAC_STATUS);
9487
9488                         phy_event = 0;
9489                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9490                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9491                                         phy_event = 1;
9492                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9493                                 phy_event = 1;
9494
9495                         if (phy_event)
9496                                 tg3_setup_phy(tp, 0);
9497                 } else if (tg3_flag(tp, POLL_SERDES)) {
9498                         u32 mac_stat = tr32(MAC_STATUS);
9499                         int need_setup = 0;
9500
9501                         if (netif_carrier_ok(tp->dev) &&
9502                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9503                                 need_setup = 1;
9504                         }
9505                         if (!netif_carrier_ok(tp->dev) &&
9506                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9507                                          MAC_STATUS_SIGNAL_DET))) {
9508                                 need_setup = 1;
9509                         }
9510                         if (need_setup) {
9511                                 if (!tp->serdes_counter) {
9512                                         tw32_f(MAC_MODE,
9513                                              (tp->mac_mode &
9514                                               ~MAC_MODE_PORT_MODE_MASK));
9515                                         udelay(40);
9516                                         tw32_f(MAC_MODE, tp->mac_mode);
9517                                         udelay(40);
9518                                 }
9519                                 tg3_setup_phy(tp, 0);
9520                         }
9521                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9522                            tg3_flag(tp, 5780_CLASS)) {
9523                         tg3_serdes_parallel_detect(tp);
9524                 }
9525
9526                 tp->timer_counter = tp->timer_multiplier;
9527         }
9528
9529         /* Heartbeat is only sent once every 2 seconds.
9530          *
9531          * The heartbeat is to tell the ASF firmware that the host
9532          * driver is still alive.  In the event that the OS crashes,
9533          * ASF needs to reset the hardware to free up the FIFO space
9534          * that may be filled with rx packets destined for the host.
9535          * If the FIFO is full, ASF will no longer function properly.
9536          *
9537          * Unintended resets have been reported on real time kernels
9538          * where the timer doesn't run on time.  Netpoll will also have
9539          * same problem.
9540          *
9541          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9542          * to check the ring condition when the heartbeat is expiring
9543          * before doing the reset.  This will prevent most unintended
9544          * resets.
9545          */
9546         if (!--tp->asf_counter) {
9547                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9548                         tg3_wait_for_event_ack(tp);
9549
9550                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9551                                       FWCMD_NICDRV_ALIVE3);
9552                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9553                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9554                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9555
9556                         tg3_generate_fw_event(tp);
9557                 }
9558                 tp->asf_counter = tp->asf_multiplier;
9559         }
9560
9561         spin_unlock(&tp->lock);
9562
9563 restart_timer:
9564         tp->timer.expires = jiffies + tp->timer_offset;
9565         add_timer(&tp->timer);
9566 }
9567
9568 static void __devinit tg3_timer_init(struct tg3 *tp)
9569 {
9570         if (tg3_flag(tp, TAGGED_STATUS) &&
9571             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9572             !tg3_flag(tp, 57765_CLASS))
9573                 tp->timer_offset = HZ;
9574         else
9575                 tp->timer_offset = HZ / 10;
9576
9577         BUG_ON(tp->timer_offset > HZ);
9578
9579         tp->timer_multiplier = (HZ / tp->timer_offset);
9580         tp->asf_multiplier = (HZ / tp->timer_offset) *
9581                              TG3_FW_UPDATE_FREQ_SEC;
9582
9583         init_timer(&tp->timer);
9584         tp->timer.data = (unsigned long) tp;
9585         tp->timer.function = tg3_timer;
9586 }
9587
9588 static void tg3_timer_start(struct tg3 *tp)
9589 {
9590         tp->asf_counter   = tp->asf_multiplier;
9591         tp->timer_counter = tp->timer_multiplier;
9592
9593         tp->timer.expires = jiffies + tp->timer_offset;
9594         add_timer(&tp->timer);
9595 }
9596
9597 static void tg3_timer_stop(struct tg3 *tp)
9598 {
9599         del_timer_sync(&tp->timer);
9600 }
9601
9602 /* Restart hardware after configuration changes, self-test, etc.
9603  * Invoked with tp->lock held.
9604  */
9605 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9606         __releases(tp->lock)
9607         __acquires(tp->lock)
9608 {
9609         int err;
9610
9611         err = tg3_init_hw(tp, reset_phy);
9612         if (err) {
9613                 netdev_err(tp->dev,
9614                            "Failed to re-initialize device, aborting\n");
9615                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9616                 tg3_full_unlock(tp);
9617                 tg3_timer_stop(tp);
9618                 tp->irq_sync = 0;
9619                 tg3_napi_enable(tp);
9620                 dev_close(tp->dev);
9621                 tg3_full_lock(tp, 0);
9622         }
9623         return err;
9624 }
9625
9626 static void tg3_reset_task(struct work_struct *work)
9627 {
9628         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9629         int err;
9630
9631         tg3_full_lock(tp, 0);
9632
9633         if (!netif_running(tp->dev)) {
9634                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9635                 tg3_full_unlock(tp);
9636                 return;
9637         }
9638
9639         tg3_full_unlock(tp);
9640
9641         tg3_phy_stop(tp);
9642
9643         tg3_netif_stop(tp);
9644
9645         tg3_full_lock(tp, 1);
9646
9647         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9648                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9649                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9650                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9651                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9652         }
9653
9654         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9655         err = tg3_init_hw(tp, 1);
9656         if (err)
9657                 goto out;
9658
9659         tg3_netif_start(tp);
9660
9661 out:
9662         tg3_full_unlock(tp);
9663
9664         if (!err)
9665                 tg3_phy_start(tp);
9666
9667         tg3_flag_clear(tp, RESET_TASK_PENDING);
9668 }
9669
9670 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9671 {
9672         irq_handler_t fn;
9673         unsigned long flags;
9674         char *name;
9675         struct tg3_napi *tnapi = &tp->napi[irq_num];
9676
9677         if (tp->irq_cnt == 1)
9678                 name = tp->dev->name;
9679         else {
9680                 name = &tnapi->irq_lbl[0];
9681                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9682                 name[IFNAMSIZ-1] = 0;
9683         }
9684
9685         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9686                 fn = tg3_msi;
9687                 if (tg3_flag(tp, 1SHOT_MSI))
9688                         fn = tg3_msi_1shot;
9689                 flags = 0;
9690         } else {
9691                 fn = tg3_interrupt;
9692                 if (tg3_flag(tp, TAGGED_STATUS))
9693                         fn = tg3_interrupt_tagged;
9694                 flags = IRQF_SHARED;
9695         }
9696
9697         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9698 }
9699
9700 static int tg3_test_interrupt(struct tg3 *tp)
9701 {
9702         struct tg3_napi *tnapi = &tp->napi[0];
9703         struct net_device *dev = tp->dev;
9704         int err, i, intr_ok = 0;
9705         u32 val;
9706
9707         if (!netif_running(dev))
9708                 return -ENODEV;
9709
9710         tg3_disable_ints(tp);
9711
9712         free_irq(tnapi->irq_vec, tnapi);
9713
9714         /*
9715          * Turn off MSI one shot mode.  Otherwise this test has no
9716          * observable way to know whether the interrupt was delivered.
9717          */
9718         if (tg3_flag(tp, 57765_PLUS)) {
9719                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9720                 tw32(MSGINT_MODE, val);
9721         }
9722
9723         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9724                           IRQF_SHARED, dev->name, tnapi);
9725         if (err)
9726                 return err;
9727
9728         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9729         tg3_enable_ints(tp);
9730
9731         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9732                tnapi->coal_now);
9733
9734         for (i = 0; i < 5; i++) {
9735                 u32 int_mbox, misc_host_ctrl;
9736
9737                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9738                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9739
9740                 if ((int_mbox != 0) ||
9741                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9742                         intr_ok = 1;
9743                         break;
9744                 }
9745
9746                 if (tg3_flag(tp, 57765_PLUS) &&
9747                     tnapi->hw_status->status_tag != tnapi->last_tag)
9748                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9749
9750                 msleep(10);
9751         }
9752
9753         tg3_disable_ints(tp);
9754
9755         free_irq(tnapi->irq_vec, tnapi);
9756
9757         err = tg3_request_irq(tp, 0);
9758
9759         if (err)
9760                 return err;
9761
9762         if (intr_ok) {
9763                 /* Reenable MSI one shot mode. */
9764                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9765                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9766                         tw32(MSGINT_MODE, val);
9767                 }
9768                 return 0;
9769         }
9770
9771         return -EIO;
9772 }
9773
9774 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9775  * successfully restored
9776  */
9777 static int tg3_test_msi(struct tg3 *tp)
9778 {
9779         int err;
9780         u16 pci_cmd;
9781
9782         if (!tg3_flag(tp, USING_MSI))
9783                 return 0;
9784
9785         /* Turn off SERR reporting in case MSI terminates with Master
9786          * Abort.
9787          */
9788         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9789         pci_write_config_word(tp->pdev, PCI_COMMAND,
9790                               pci_cmd & ~PCI_COMMAND_SERR);
9791
9792         err = tg3_test_interrupt(tp);
9793
9794         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9795
9796         if (!err)
9797                 return 0;
9798
9799         /* other failures */
9800         if (err != -EIO)
9801                 return err;
9802
9803         /* MSI test failed, go back to INTx mode */
9804         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9805                     "to INTx mode. Please report this failure to the PCI "
9806                     "maintainer and include system chipset information\n");
9807
9808         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9809
9810         pci_disable_msi(tp->pdev);
9811
9812         tg3_flag_clear(tp, USING_MSI);
9813         tp->napi[0].irq_vec = tp->pdev->irq;
9814
9815         err = tg3_request_irq(tp, 0);
9816         if (err)
9817                 return err;
9818
9819         /* Need to reset the chip because the MSI cycle may have terminated
9820          * with Master Abort.
9821          */
9822         tg3_full_lock(tp, 1);
9823
9824         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9825         err = tg3_init_hw(tp, 1);
9826
9827         tg3_full_unlock(tp);
9828
9829         if (err)
9830                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9831
9832         return err;
9833 }
9834
9835 static int tg3_request_firmware(struct tg3 *tp)
9836 {
9837         const __be32 *fw_data;
9838
9839         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9840                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9841                            tp->fw_needed);
9842                 return -ENOENT;
9843         }
9844
9845         fw_data = (void *)tp->fw->data;
9846
9847         /* Firmware blob starts with version numbers, followed by
9848          * start address and _full_ length including BSS sections
9849          * (which must be longer than the actual data, of course
9850          */
9851
9852         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9853         if (tp->fw_len < (tp->fw->size - 12)) {
9854                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9855                            tp->fw_len, tp->fw_needed);
9856                 release_firmware(tp->fw);
9857                 tp->fw = NULL;
9858                 return -EINVAL;
9859         }
9860
9861         /* We no longer need firmware; we have it. */
9862         tp->fw_needed = NULL;
9863         return 0;
9864 }
9865
9866 static bool tg3_enable_msix(struct tg3 *tp)
9867 {
9868         int i, rc;
9869         struct msix_entry msix_ent[tp->irq_max];
9870
9871         tp->irq_cnt = num_online_cpus();
9872         if (tp->irq_cnt > 1) {
9873                 /* We want as many rx rings enabled as there are cpus.
9874                  * In multiqueue MSI-X mode, the first MSI-X vector
9875                  * only deals with link interrupts, etc, so we add
9876                  * one to the number of vectors we are requesting.
9877                  */
9878                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9879         }
9880
9881         for (i = 0; i < tp->irq_max; i++) {
9882                 msix_ent[i].entry  = i;
9883                 msix_ent[i].vector = 0;
9884         }
9885
9886         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9887         if (rc < 0) {
9888                 return false;
9889         } else if (rc != 0) {
9890                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9891                         return false;
9892                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9893                               tp->irq_cnt, rc);
9894                 tp->irq_cnt = rc;
9895         }
9896
9897         for (i = 0; i < tp->irq_max; i++)
9898                 tp->napi[i].irq_vec = msix_ent[i].vector;
9899
9900         netif_set_real_num_tx_queues(tp->dev, 1);
9901         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9902         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9903                 pci_disable_msix(tp->pdev);
9904                 return false;
9905         }
9906
9907         if (tp->irq_cnt > 1) {
9908                 tg3_flag_set(tp, ENABLE_RSS);
9909
9910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9911                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9912                         tg3_flag_set(tp, ENABLE_TSS);
9913                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9914                 }
9915         }
9916
9917         return true;
9918 }
9919
9920 static void tg3_ints_init(struct tg3 *tp)
9921 {
9922         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9923             !tg3_flag(tp, TAGGED_STATUS)) {
9924                 /* All MSI supporting chips should support tagged
9925                  * status.  Assert that this is the case.
9926                  */
9927                 netdev_warn(tp->dev,
9928                             "MSI without TAGGED_STATUS? Not using MSI\n");
9929                 goto defcfg;
9930         }
9931
9932         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9933                 tg3_flag_set(tp, USING_MSIX);
9934         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9935                 tg3_flag_set(tp, USING_MSI);
9936
9937         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9938                 u32 msi_mode = tr32(MSGINT_MODE);
9939                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9940                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9941                 if (!tg3_flag(tp, 1SHOT_MSI))
9942                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9943                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9944         }
9945 defcfg:
9946         if (!tg3_flag(tp, USING_MSIX)) {
9947                 tp->irq_cnt = 1;
9948                 tp->napi[0].irq_vec = tp->pdev->irq;
9949                 netif_set_real_num_tx_queues(tp->dev, 1);
9950                 netif_set_real_num_rx_queues(tp->dev, 1);
9951         }
9952 }
9953
9954 static void tg3_ints_fini(struct tg3 *tp)
9955 {
9956         if (tg3_flag(tp, USING_MSIX))
9957                 pci_disable_msix(tp->pdev);
9958         else if (tg3_flag(tp, USING_MSI))
9959                 pci_disable_msi(tp->pdev);
9960         tg3_flag_clear(tp, USING_MSI);
9961         tg3_flag_clear(tp, USING_MSIX);
9962         tg3_flag_clear(tp, ENABLE_RSS);
9963         tg3_flag_clear(tp, ENABLE_TSS);
9964 }
9965
9966 static int tg3_open(struct net_device *dev)
9967 {
9968         struct tg3 *tp = netdev_priv(dev);
9969         int i, err;
9970
9971         if (tp->fw_needed) {
9972                 err = tg3_request_firmware(tp);
9973                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9974                         if (err)
9975                                 return err;
9976                 } else if (err) {
9977                         netdev_warn(tp->dev, "TSO capability disabled\n");
9978                         tg3_flag_clear(tp, TSO_CAPABLE);
9979                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9980                         netdev_notice(tp->dev, "TSO capability restored\n");
9981                         tg3_flag_set(tp, TSO_CAPABLE);
9982                 }
9983         }
9984
9985         netif_carrier_off(tp->dev);
9986
9987         err = tg3_power_up(tp);
9988         if (err)
9989                 return err;
9990
9991         tg3_full_lock(tp, 0);
9992
9993         tg3_disable_ints(tp);
9994         tg3_flag_clear(tp, INIT_COMPLETE);
9995
9996         tg3_full_unlock(tp);
9997
9998         /*
9999          * Setup interrupts first so we know how
10000          * many NAPI resources to allocate
10001          */
10002         tg3_ints_init(tp);
10003
10004         tg3_rss_check_indir_tbl(tp);
10005
10006         /* The placement of this call is tied
10007          * to the setup and use of Host TX descriptors.
10008          */
10009         err = tg3_alloc_consistent(tp);
10010         if (err)
10011                 goto err_out1;
10012
10013         tg3_napi_init(tp);
10014
10015         tg3_napi_enable(tp);
10016
10017         for (i = 0; i < tp->irq_cnt; i++) {
10018                 struct tg3_napi *tnapi = &tp->napi[i];
10019                 err = tg3_request_irq(tp, i);
10020                 if (err) {
10021                         for (i--; i >= 0; i--) {
10022                                 tnapi = &tp->napi[i];
10023                                 free_irq(tnapi->irq_vec, tnapi);
10024                         }
10025                         goto err_out2;
10026                 }
10027         }
10028
10029         tg3_full_lock(tp, 0);
10030
10031         err = tg3_init_hw(tp, 1);
10032         if (err) {
10033                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10034                 tg3_free_rings(tp);
10035         }
10036
10037         tg3_full_unlock(tp);
10038
10039         if (err)
10040                 goto err_out3;
10041
10042         if (tg3_flag(tp, USING_MSI)) {
10043                 err = tg3_test_msi(tp);
10044
10045                 if (err) {
10046                         tg3_full_lock(tp, 0);
10047                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10048                         tg3_free_rings(tp);
10049                         tg3_full_unlock(tp);
10050
10051                         goto err_out2;
10052                 }
10053
10054                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10055                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10056
10057                         tw32(PCIE_TRANSACTION_CFG,
10058                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10059                 }
10060         }
10061
10062         tg3_phy_start(tp);
10063
10064         tg3_full_lock(tp, 0);
10065
10066         tg3_timer_start(tp);
10067         tg3_flag_set(tp, INIT_COMPLETE);
10068         tg3_enable_ints(tp);
10069
10070         tg3_full_unlock(tp);
10071
10072         netif_tx_start_all_queues(dev);
10073
10074         /*
10075          * Reset loopback feature if it was turned on while the device was down
10076          * make sure that it's installed properly now.
10077          */
10078         if (dev->features & NETIF_F_LOOPBACK)
10079                 tg3_set_loopback(dev, dev->features);
10080
10081         return 0;
10082
10083 err_out3:
10084         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10085                 struct tg3_napi *tnapi = &tp->napi[i];
10086                 free_irq(tnapi->irq_vec, tnapi);
10087         }
10088
10089 err_out2:
10090         tg3_napi_disable(tp);
10091         tg3_napi_fini(tp);
10092         tg3_free_consistent(tp);
10093
10094 err_out1:
10095         tg3_ints_fini(tp);
10096         tg3_frob_aux_power(tp, false);
10097         pci_set_power_state(tp->pdev, PCI_D3hot);
10098         return err;
10099 }
10100
10101 static int tg3_close(struct net_device *dev)
10102 {
10103         int i;
10104         struct tg3 *tp = netdev_priv(dev);
10105
10106         tg3_napi_disable(tp);
10107         tg3_reset_task_cancel(tp);
10108
10109         netif_tx_stop_all_queues(dev);
10110
10111         tg3_timer_stop(tp);
10112
10113         tg3_phy_stop(tp);
10114
10115         tg3_full_lock(tp, 1);
10116
10117         tg3_disable_ints(tp);
10118
10119         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10120         tg3_free_rings(tp);
10121         tg3_flag_clear(tp, INIT_COMPLETE);
10122
10123         tg3_full_unlock(tp);
10124
10125         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10126                 struct tg3_napi *tnapi = &tp->napi[i];
10127                 free_irq(tnapi->irq_vec, tnapi);
10128         }
10129
10130         tg3_ints_fini(tp);
10131
10132         /* Clear stats across close / open calls */
10133         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10134         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10135
10136         tg3_napi_fini(tp);
10137
10138         tg3_free_consistent(tp);
10139
10140         tg3_power_down(tp);
10141
10142         netif_carrier_off(tp->dev);
10143
10144         return 0;
10145 }
10146
10147 static inline u64 get_stat64(tg3_stat64_t *val)
10148 {
10149        return ((u64)val->high << 32) | ((u64)val->low);
10150 }
10151
10152 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10153 {
10154         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10155
10156         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10157             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10158              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10159                 u32 val;
10160
10161                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10162                         tg3_writephy(tp, MII_TG3_TEST1,
10163                                      val | MII_TG3_TEST1_CRC_EN);
10164                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10165                 } else
10166                         val = 0;
10167
10168                 tp->phy_crc_errors += val;
10169
10170                 return tp->phy_crc_errors;
10171         }
10172
10173         return get_stat64(&hw_stats->rx_fcs_errors);
10174 }
10175
10176 #define ESTAT_ADD(member) \
10177         estats->member =        old_estats->member + \
10178                                 get_stat64(&hw_stats->member)
10179
10180 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10181 {
10182         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10183         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10184
10185         ESTAT_ADD(rx_octets);
10186         ESTAT_ADD(rx_fragments);
10187         ESTAT_ADD(rx_ucast_packets);
10188         ESTAT_ADD(rx_mcast_packets);
10189         ESTAT_ADD(rx_bcast_packets);
10190         ESTAT_ADD(rx_fcs_errors);
10191         ESTAT_ADD(rx_align_errors);
10192         ESTAT_ADD(rx_xon_pause_rcvd);
10193         ESTAT_ADD(rx_xoff_pause_rcvd);
10194         ESTAT_ADD(rx_mac_ctrl_rcvd);
10195         ESTAT_ADD(rx_xoff_entered);
10196         ESTAT_ADD(rx_frame_too_long_errors);
10197         ESTAT_ADD(rx_jabbers);
10198         ESTAT_ADD(rx_undersize_packets);
10199         ESTAT_ADD(rx_in_length_errors);
10200         ESTAT_ADD(rx_out_length_errors);
10201         ESTAT_ADD(rx_64_or_less_octet_packets);
10202         ESTAT_ADD(rx_65_to_127_octet_packets);
10203         ESTAT_ADD(rx_128_to_255_octet_packets);
10204         ESTAT_ADD(rx_256_to_511_octet_packets);
10205         ESTAT_ADD(rx_512_to_1023_octet_packets);
10206         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10207         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10208         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10209         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10210         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10211
10212         ESTAT_ADD(tx_octets);
10213         ESTAT_ADD(tx_collisions);
10214         ESTAT_ADD(tx_xon_sent);
10215         ESTAT_ADD(tx_xoff_sent);
10216         ESTAT_ADD(tx_flow_control);
10217         ESTAT_ADD(tx_mac_errors);
10218         ESTAT_ADD(tx_single_collisions);
10219         ESTAT_ADD(tx_mult_collisions);
10220         ESTAT_ADD(tx_deferred);
10221         ESTAT_ADD(tx_excessive_collisions);
10222         ESTAT_ADD(tx_late_collisions);
10223         ESTAT_ADD(tx_collide_2times);
10224         ESTAT_ADD(tx_collide_3times);
10225         ESTAT_ADD(tx_collide_4times);
10226         ESTAT_ADD(tx_collide_5times);
10227         ESTAT_ADD(tx_collide_6times);
10228         ESTAT_ADD(tx_collide_7times);
10229         ESTAT_ADD(tx_collide_8times);
10230         ESTAT_ADD(tx_collide_9times);
10231         ESTAT_ADD(tx_collide_10times);
10232         ESTAT_ADD(tx_collide_11times);
10233         ESTAT_ADD(tx_collide_12times);
10234         ESTAT_ADD(tx_collide_13times);
10235         ESTAT_ADD(tx_collide_14times);
10236         ESTAT_ADD(tx_collide_15times);
10237         ESTAT_ADD(tx_ucast_packets);
10238         ESTAT_ADD(tx_mcast_packets);
10239         ESTAT_ADD(tx_bcast_packets);
10240         ESTAT_ADD(tx_carrier_sense_errors);
10241         ESTAT_ADD(tx_discards);
10242         ESTAT_ADD(tx_errors);
10243
10244         ESTAT_ADD(dma_writeq_full);
10245         ESTAT_ADD(dma_write_prioq_full);
10246         ESTAT_ADD(rxbds_empty);
10247         ESTAT_ADD(rx_discards);
10248         ESTAT_ADD(rx_errors);
10249         ESTAT_ADD(rx_threshold_hit);
10250
10251         ESTAT_ADD(dma_readq_full);
10252         ESTAT_ADD(dma_read_prioq_full);
10253         ESTAT_ADD(tx_comp_queue_full);
10254
10255         ESTAT_ADD(ring_set_send_prod_index);
10256         ESTAT_ADD(ring_status_update);
10257         ESTAT_ADD(nic_irqs);
10258         ESTAT_ADD(nic_avoided_irqs);
10259         ESTAT_ADD(nic_tx_threshold_hit);
10260
10261         ESTAT_ADD(mbuf_lwm_thresh_hit);
10262 }
10263
10264 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10265 {
10266         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10267         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10268
10269         stats->rx_packets = old_stats->rx_packets +
10270                 get_stat64(&hw_stats->rx_ucast_packets) +
10271                 get_stat64(&hw_stats->rx_mcast_packets) +
10272                 get_stat64(&hw_stats->rx_bcast_packets);
10273
10274         stats->tx_packets = old_stats->tx_packets +
10275                 get_stat64(&hw_stats->tx_ucast_packets) +
10276                 get_stat64(&hw_stats->tx_mcast_packets) +
10277                 get_stat64(&hw_stats->tx_bcast_packets);
10278
10279         stats->rx_bytes = old_stats->rx_bytes +
10280                 get_stat64(&hw_stats->rx_octets);
10281         stats->tx_bytes = old_stats->tx_bytes +
10282                 get_stat64(&hw_stats->tx_octets);
10283
10284         stats->rx_errors = old_stats->rx_errors +
10285                 get_stat64(&hw_stats->rx_errors);
10286         stats->tx_errors = old_stats->tx_errors +
10287                 get_stat64(&hw_stats->tx_errors) +
10288                 get_stat64(&hw_stats->tx_mac_errors) +
10289                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10290                 get_stat64(&hw_stats->tx_discards);
10291
10292         stats->multicast = old_stats->multicast +
10293                 get_stat64(&hw_stats->rx_mcast_packets);
10294         stats->collisions = old_stats->collisions +
10295                 get_stat64(&hw_stats->tx_collisions);
10296
10297         stats->rx_length_errors = old_stats->rx_length_errors +
10298                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10299                 get_stat64(&hw_stats->rx_undersize_packets);
10300
10301         stats->rx_over_errors = old_stats->rx_over_errors +
10302                 get_stat64(&hw_stats->rxbds_empty);
10303         stats->rx_frame_errors = old_stats->rx_frame_errors +
10304                 get_stat64(&hw_stats->rx_align_errors);
10305         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10306                 get_stat64(&hw_stats->tx_discards);
10307         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10308                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10309
10310         stats->rx_crc_errors = old_stats->rx_crc_errors +
10311                 tg3_calc_crc_errors(tp);
10312
10313         stats->rx_missed_errors = old_stats->rx_missed_errors +
10314                 get_stat64(&hw_stats->rx_discards);
10315
10316         stats->rx_dropped = tp->rx_dropped;
10317         stats->tx_dropped = tp->tx_dropped;
10318 }
10319
10320 static int tg3_get_regs_len(struct net_device *dev)
10321 {
10322         return TG3_REG_BLK_SIZE;
10323 }
10324
10325 static void tg3_get_regs(struct net_device *dev,
10326                 struct ethtool_regs *regs, void *_p)
10327 {
10328         struct tg3 *tp = netdev_priv(dev);
10329
10330         regs->version = 0;
10331
10332         memset(_p, 0, TG3_REG_BLK_SIZE);
10333
10334         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10335                 return;
10336
10337         tg3_full_lock(tp, 0);
10338
10339         tg3_dump_legacy_regs(tp, (u32 *)_p);
10340
10341         tg3_full_unlock(tp);
10342 }
10343
10344 static int tg3_get_eeprom_len(struct net_device *dev)
10345 {
10346         struct tg3 *tp = netdev_priv(dev);
10347
10348         return tp->nvram_size;
10349 }
10350
10351 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10352 {
10353         struct tg3 *tp = netdev_priv(dev);
10354         int ret;
10355         u8  *pd;
10356         u32 i, offset, len, b_offset, b_count;
10357         __be32 val;
10358
10359         if (tg3_flag(tp, NO_NVRAM))
10360                 return -EINVAL;
10361
10362         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10363                 return -EAGAIN;
10364
10365         offset = eeprom->offset;
10366         len = eeprom->len;
10367         eeprom->len = 0;
10368
10369         eeprom->magic = TG3_EEPROM_MAGIC;
10370
10371         if (offset & 3) {
10372                 /* adjustments to start on required 4 byte boundary */
10373                 b_offset = offset & 3;
10374                 b_count = 4 - b_offset;
10375                 if (b_count > len) {
10376                         /* i.e. offset=1 len=2 */
10377                         b_count = len;
10378                 }
10379                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10380                 if (ret)
10381                         return ret;
10382                 memcpy(data, ((char *)&val) + b_offset, b_count);
10383                 len -= b_count;
10384                 offset += b_count;
10385                 eeprom->len += b_count;
10386         }
10387
10388         /* read bytes up to the last 4 byte boundary */
10389         pd = &data[eeprom->len];
10390         for (i = 0; i < (len - (len & 3)); i += 4) {
10391                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10392                 if (ret) {
10393                         eeprom->len += i;
10394                         return ret;
10395                 }
10396                 memcpy(pd + i, &val, 4);
10397         }
10398         eeprom->len += i;
10399
10400         if (len & 3) {
10401                 /* read last bytes not ending on 4 byte boundary */
10402                 pd = &data[eeprom->len];
10403                 b_count = len & 3;
10404                 b_offset = offset + len - b_count;
10405                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10406                 if (ret)
10407                         return ret;
10408                 memcpy(pd, &val, b_count);
10409                 eeprom->len += b_count;
10410         }
10411         return 0;
10412 }
10413
10414 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10415 {
10416         struct tg3 *tp = netdev_priv(dev);
10417         int ret;
10418         u32 offset, len, b_offset, odd_len;
10419         u8 *buf;
10420         __be32 start, end;
10421
10422         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10423                 return -EAGAIN;
10424
10425         if (tg3_flag(tp, NO_NVRAM) ||
10426             eeprom->magic != TG3_EEPROM_MAGIC)
10427                 return -EINVAL;
10428
10429         offset = eeprom->offset;
10430         len = eeprom->len;
10431
10432         if ((b_offset = (offset & 3))) {
10433                 /* adjustments to start on required 4 byte boundary */
10434                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10435                 if (ret)
10436                         return ret;
10437                 len += b_offset;
10438                 offset &= ~3;
10439                 if (len < 4)
10440                         len = 4;
10441         }
10442
10443         odd_len = 0;
10444         if (len & 3) {
10445                 /* adjustments to end on required 4 byte boundary */
10446                 odd_len = 1;
10447                 len = (len + 3) & ~3;
10448                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10449                 if (ret)
10450                         return ret;
10451         }
10452
10453         buf = data;
10454         if (b_offset || odd_len) {
10455                 buf = kmalloc(len, GFP_KERNEL);
10456                 if (!buf)
10457                         return -ENOMEM;
10458                 if (b_offset)
10459                         memcpy(buf, &start, 4);
10460                 if (odd_len)
10461                         memcpy(buf+len-4, &end, 4);
10462                 memcpy(buf + b_offset, data, eeprom->len);
10463         }
10464
10465         ret = tg3_nvram_write_block(tp, offset, len, buf);
10466
10467         if (buf != data)
10468                 kfree(buf);
10469
10470         return ret;
10471 }
10472
10473 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10474 {
10475         struct tg3 *tp = netdev_priv(dev);
10476
10477         if (tg3_flag(tp, USE_PHYLIB)) {
10478                 struct phy_device *phydev;
10479                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10480                         return -EAGAIN;
10481                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10482                 return phy_ethtool_gset(phydev, cmd);
10483         }
10484
10485         cmd->supported = (SUPPORTED_Autoneg);
10486
10487         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10488                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10489                                    SUPPORTED_1000baseT_Full);
10490
10491         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10492                 cmd->supported |= (SUPPORTED_100baseT_Half |
10493                                   SUPPORTED_100baseT_Full |
10494                                   SUPPORTED_10baseT_Half |
10495                                   SUPPORTED_10baseT_Full |
10496                                   SUPPORTED_TP);
10497                 cmd->port = PORT_TP;
10498         } else {
10499                 cmd->supported |= SUPPORTED_FIBRE;
10500                 cmd->port = PORT_FIBRE;
10501         }
10502
10503         cmd->advertising = tp->link_config.advertising;
10504         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10505                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10506                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10507                                 cmd->advertising |= ADVERTISED_Pause;
10508                         } else {
10509                                 cmd->advertising |= ADVERTISED_Pause |
10510                                                     ADVERTISED_Asym_Pause;
10511                         }
10512                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10513                         cmd->advertising |= ADVERTISED_Asym_Pause;
10514                 }
10515         }
10516         if (netif_running(dev) && netif_carrier_ok(dev)) {
10517                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10518                 cmd->duplex = tp->link_config.active_duplex;
10519                 cmd->lp_advertising = tp->link_config.rmt_adv;
10520                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10521                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10522                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10523                         else
10524                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10525                 }
10526         } else {
10527                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10528                 cmd->duplex = DUPLEX_UNKNOWN;
10529                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10530         }
10531         cmd->phy_address = tp->phy_addr;
10532         cmd->transceiver = XCVR_INTERNAL;
10533         cmd->autoneg = tp->link_config.autoneg;
10534         cmd->maxtxpkt = 0;
10535         cmd->maxrxpkt = 0;
10536         return 0;
10537 }
10538
10539 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10540 {
10541         struct tg3 *tp = netdev_priv(dev);
10542         u32 speed = ethtool_cmd_speed(cmd);
10543
10544         if (tg3_flag(tp, USE_PHYLIB)) {
10545                 struct phy_device *phydev;
10546                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10547                         return -EAGAIN;
10548                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10549                 return phy_ethtool_sset(phydev, cmd);
10550         }
10551
10552         if (cmd->autoneg != AUTONEG_ENABLE &&
10553             cmd->autoneg != AUTONEG_DISABLE)
10554                 return -EINVAL;
10555
10556         if (cmd->autoneg == AUTONEG_DISABLE &&
10557             cmd->duplex != DUPLEX_FULL &&
10558             cmd->duplex != DUPLEX_HALF)
10559                 return -EINVAL;
10560
10561         if (cmd->autoneg == AUTONEG_ENABLE) {
10562                 u32 mask = ADVERTISED_Autoneg |
10563                            ADVERTISED_Pause |
10564                            ADVERTISED_Asym_Pause;
10565
10566                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10567                         mask |= ADVERTISED_1000baseT_Half |
10568                                 ADVERTISED_1000baseT_Full;
10569
10570                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10571                         mask |= ADVERTISED_100baseT_Half |
10572                                 ADVERTISED_100baseT_Full |
10573                                 ADVERTISED_10baseT_Half |
10574                                 ADVERTISED_10baseT_Full |
10575                                 ADVERTISED_TP;
10576                 else
10577                         mask |= ADVERTISED_FIBRE;
10578
10579                 if (cmd->advertising & ~mask)
10580                         return -EINVAL;
10581
10582                 mask &= (ADVERTISED_1000baseT_Half |
10583                          ADVERTISED_1000baseT_Full |
10584                          ADVERTISED_100baseT_Half |
10585                          ADVERTISED_100baseT_Full |
10586                          ADVERTISED_10baseT_Half |
10587                          ADVERTISED_10baseT_Full);
10588
10589                 cmd->advertising &= mask;
10590         } else {
10591                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10592                         if (speed != SPEED_1000)
10593                                 return -EINVAL;
10594
10595                         if (cmd->duplex != DUPLEX_FULL)
10596                                 return -EINVAL;
10597                 } else {
10598                         if (speed != SPEED_100 &&
10599                             speed != SPEED_10)
10600                                 return -EINVAL;
10601                 }
10602         }
10603
10604         tg3_full_lock(tp, 0);
10605
10606         tp->link_config.autoneg = cmd->autoneg;
10607         if (cmd->autoneg == AUTONEG_ENABLE) {
10608                 tp->link_config.advertising = (cmd->advertising |
10609                                               ADVERTISED_Autoneg);
10610                 tp->link_config.speed = SPEED_UNKNOWN;
10611                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10612         } else {
10613                 tp->link_config.advertising = 0;
10614                 tp->link_config.speed = speed;
10615                 tp->link_config.duplex = cmd->duplex;
10616         }
10617
10618         if (netif_running(dev))
10619                 tg3_setup_phy(tp, 1);
10620
10621         tg3_full_unlock(tp);
10622
10623         return 0;
10624 }
10625
10626 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10627 {
10628         struct tg3 *tp = netdev_priv(dev);
10629
10630         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10631         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10632         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10633         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10634 }
10635
10636 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10637 {
10638         struct tg3 *tp = netdev_priv(dev);
10639
10640         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10641                 wol->supported = WAKE_MAGIC;
10642         else
10643                 wol->supported = 0;
10644         wol->wolopts = 0;
10645         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10646                 wol->wolopts = WAKE_MAGIC;
10647         memset(&wol->sopass, 0, sizeof(wol->sopass));
10648 }
10649
10650 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10651 {
10652         struct tg3 *tp = netdev_priv(dev);
10653         struct device *dp = &tp->pdev->dev;
10654
10655         if (wol->wolopts & ~WAKE_MAGIC)
10656                 return -EINVAL;
10657         if ((wol->wolopts & WAKE_MAGIC) &&
10658             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10659                 return -EINVAL;
10660
10661         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10662
10663         spin_lock_bh(&tp->lock);
10664         if (device_may_wakeup(dp))
10665                 tg3_flag_set(tp, WOL_ENABLE);
10666         else
10667                 tg3_flag_clear(tp, WOL_ENABLE);
10668         spin_unlock_bh(&tp->lock);
10669
10670         return 0;
10671 }
10672
10673 static u32 tg3_get_msglevel(struct net_device *dev)
10674 {
10675         struct tg3 *tp = netdev_priv(dev);
10676         return tp->msg_enable;
10677 }
10678
10679 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10680 {
10681         struct tg3 *tp = netdev_priv(dev);
10682         tp->msg_enable = value;
10683 }
10684
10685 static int tg3_nway_reset(struct net_device *dev)
10686 {
10687         struct tg3 *tp = netdev_priv(dev);
10688         int r;
10689
10690         if (!netif_running(dev))
10691                 return -EAGAIN;
10692
10693         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10694                 return -EINVAL;
10695
10696         if (tg3_flag(tp, USE_PHYLIB)) {
10697                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10698                         return -EAGAIN;
10699                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10700         } else {
10701                 u32 bmcr;
10702
10703                 spin_lock_bh(&tp->lock);
10704                 r = -EINVAL;
10705                 tg3_readphy(tp, MII_BMCR, &bmcr);
10706                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10707                     ((bmcr & BMCR_ANENABLE) ||
10708                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10709                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10710                                                    BMCR_ANENABLE);
10711                         r = 0;
10712                 }
10713                 spin_unlock_bh(&tp->lock);
10714         }
10715
10716         return r;
10717 }
10718
10719 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10720 {
10721         struct tg3 *tp = netdev_priv(dev);
10722
10723         ering->rx_max_pending = tp->rx_std_ring_mask;
10724         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10725                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10726         else
10727                 ering->rx_jumbo_max_pending = 0;
10728
10729         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10730
10731         ering->rx_pending = tp->rx_pending;
10732         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10733                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10734         else
10735                 ering->rx_jumbo_pending = 0;
10736
10737         ering->tx_pending = tp->napi[0].tx_pending;
10738 }
10739
10740 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10741 {
10742         struct tg3 *tp = netdev_priv(dev);
10743         int i, irq_sync = 0, err = 0;
10744
10745         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10746             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10747             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10748             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10749             (tg3_flag(tp, TSO_BUG) &&
10750              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10751                 return -EINVAL;
10752
10753         if (netif_running(dev)) {
10754                 tg3_phy_stop(tp);
10755                 tg3_netif_stop(tp);
10756                 irq_sync = 1;
10757         }
10758
10759         tg3_full_lock(tp, irq_sync);
10760
10761         tp->rx_pending = ering->rx_pending;
10762
10763         if (tg3_flag(tp, MAX_RXPEND_64) &&
10764             tp->rx_pending > 63)
10765                 tp->rx_pending = 63;
10766         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10767
10768         for (i = 0; i < tp->irq_max; i++)
10769                 tp->napi[i].tx_pending = ering->tx_pending;
10770
10771         if (netif_running(dev)) {
10772                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10773                 err = tg3_restart_hw(tp, 1);
10774                 if (!err)
10775                         tg3_netif_start(tp);
10776         }
10777
10778         tg3_full_unlock(tp);
10779
10780         if (irq_sync && !err)
10781                 tg3_phy_start(tp);
10782
10783         return err;
10784 }
10785
10786 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10787 {
10788         struct tg3 *tp = netdev_priv(dev);
10789
10790         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10791
10792         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10793                 epause->rx_pause = 1;
10794         else
10795                 epause->rx_pause = 0;
10796
10797         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10798                 epause->tx_pause = 1;
10799         else
10800                 epause->tx_pause = 0;
10801 }
10802
10803 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10804 {
10805         struct tg3 *tp = netdev_priv(dev);
10806         int err = 0;
10807
10808         if (tg3_flag(tp, USE_PHYLIB)) {
10809                 u32 newadv;
10810                 struct phy_device *phydev;
10811
10812                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10813
10814                 if (!(phydev->supported & SUPPORTED_Pause) ||
10815                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10816                      (epause->rx_pause != epause->tx_pause)))
10817                         return -EINVAL;
10818
10819                 tp->link_config.flowctrl = 0;
10820                 if (epause->rx_pause) {
10821                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10822
10823                         if (epause->tx_pause) {
10824                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10825                                 newadv = ADVERTISED_Pause;
10826                         } else
10827                                 newadv = ADVERTISED_Pause |
10828                                          ADVERTISED_Asym_Pause;
10829                 } else if (epause->tx_pause) {
10830                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10831                         newadv = ADVERTISED_Asym_Pause;
10832                 } else
10833                         newadv = 0;
10834
10835                 if (epause->autoneg)
10836                         tg3_flag_set(tp, PAUSE_AUTONEG);
10837                 else
10838                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10839
10840                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10841                         u32 oldadv = phydev->advertising &
10842                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10843                         if (oldadv != newadv) {
10844                                 phydev->advertising &=
10845                                         ~(ADVERTISED_Pause |
10846                                           ADVERTISED_Asym_Pause);
10847                                 phydev->advertising |= newadv;
10848                                 if (phydev->autoneg) {
10849                                         /*
10850                                          * Always renegotiate the link to
10851                                          * inform our link partner of our
10852                                          * flow control settings, even if the
10853                                          * flow control is forced.  Let
10854                                          * tg3_adjust_link() do the final
10855                                          * flow control setup.
10856                                          */
10857                                         return phy_start_aneg(phydev);
10858                                 }
10859                         }
10860
10861                         if (!epause->autoneg)
10862                                 tg3_setup_flow_control(tp, 0, 0);
10863                 } else {
10864                         tp->link_config.advertising &=
10865                                         ~(ADVERTISED_Pause |
10866                                           ADVERTISED_Asym_Pause);
10867                         tp->link_config.advertising |= newadv;
10868                 }
10869         } else {
10870                 int irq_sync = 0;
10871
10872                 if (netif_running(dev)) {
10873                         tg3_netif_stop(tp);
10874                         irq_sync = 1;
10875                 }
10876
10877                 tg3_full_lock(tp, irq_sync);
10878
10879                 if (epause->autoneg)
10880                         tg3_flag_set(tp, PAUSE_AUTONEG);
10881                 else
10882                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10883                 if (epause->rx_pause)
10884                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10885                 else
10886                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10887                 if (epause->tx_pause)
10888                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10889                 else
10890                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10891
10892                 if (netif_running(dev)) {
10893                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10894                         err = tg3_restart_hw(tp, 1);
10895                         if (!err)
10896                                 tg3_netif_start(tp);
10897                 }
10898
10899                 tg3_full_unlock(tp);
10900         }
10901
10902         return err;
10903 }
10904
10905 static int tg3_get_sset_count(struct net_device *dev, int sset)
10906 {
10907         switch (sset) {
10908         case ETH_SS_TEST:
10909                 return TG3_NUM_TEST;
10910         case ETH_SS_STATS:
10911                 return TG3_NUM_STATS;
10912         default:
10913                 return -EOPNOTSUPP;
10914         }
10915 }
10916
10917 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10918                          u32 *rules __always_unused)
10919 {
10920         struct tg3 *tp = netdev_priv(dev);
10921
10922         if (!tg3_flag(tp, SUPPORT_MSIX))
10923                 return -EOPNOTSUPP;
10924
10925         switch (info->cmd) {
10926         case ETHTOOL_GRXRINGS:
10927                 if (netif_running(tp->dev))
10928                         info->data = tp->irq_cnt;
10929                 else {
10930                         info->data = num_online_cpus();
10931                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10932                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10933                 }
10934
10935                 /* The first interrupt vector only
10936                  * handles link interrupts.
10937                  */
10938                 info->data -= 1;
10939                 return 0;
10940
10941         default:
10942                 return -EOPNOTSUPP;
10943         }
10944 }
10945
10946 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10947 {
10948         u32 size = 0;
10949         struct tg3 *tp = netdev_priv(dev);
10950
10951         if (tg3_flag(tp, SUPPORT_MSIX))
10952                 size = TG3_RSS_INDIR_TBL_SIZE;
10953
10954         return size;
10955 }
10956
10957 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10958 {
10959         struct tg3 *tp = netdev_priv(dev);
10960         int i;
10961
10962         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10963                 indir[i] = tp->rss_ind_tbl[i];
10964
10965         return 0;
10966 }
10967
10968 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10969 {
10970         struct tg3 *tp = netdev_priv(dev);
10971         size_t i;
10972
10973         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10974                 tp->rss_ind_tbl[i] = indir[i];
10975
10976         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10977                 return 0;
10978
10979         /* It is legal to write the indirection
10980          * table while the device is running.
10981          */
10982         tg3_full_lock(tp, 0);
10983         tg3_rss_write_indir_tbl(tp);
10984         tg3_full_unlock(tp);
10985
10986         return 0;
10987 }
10988
10989 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10990 {
10991         switch (stringset) {
10992         case ETH_SS_STATS:
10993                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10994                 break;
10995         case ETH_SS_TEST:
10996                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10997                 break;
10998         default:
10999                 WARN_ON(1);     /* we need a WARN() */
11000                 break;
11001         }
11002 }
11003
11004 static int tg3_set_phys_id(struct net_device *dev,
11005                             enum ethtool_phys_id_state state)
11006 {
11007         struct tg3 *tp = netdev_priv(dev);
11008
11009         if (!netif_running(tp->dev))
11010                 return -EAGAIN;
11011
11012         switch (state) {
11013         case ETHTOOL_ID_ACTIVE:
11014                 return 1;       /* cycle on/off once per second */
11015
11016         case ETHTOOL_ID_ON:
11017                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11018                      LED_CTRL_1000MBPS_ON |
11019                      LED_CTRL_100MBPS_ON |
11020                      LED_CTRL_10MBPS_ON |
11021                      LED_CTRL_TRAFFIC_OVERRIDE |
11022                      LED_CTRL_TRAFFIC_BLINK |
11023                      LED_CTRL_TRAFFIC_LED);
11024                 break;
11025
11026         case ETHTOOL_ID_OFF:
11027                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11028                      LED_CTRL_TRAFFIC_OVERRIDE);
11029                 break;
11030
11031         case ETHTOOL_ID_INACTIVE:
11032                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11033                 break;
11034         }
11035
11036         return 0;
11037 }
11038
11039 static void tg3_get_ethtool_stats(struct net_device *dev,
11040                                    struct ethtool_stats *estats, u64 *tmp_stats)
11041 {
11042         struct tg3 *tp = netdev_priv(dev);
11043
11044         if (tp->hw_stats)
11045                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11046         else
11047                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11048 }
11049
11050 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11051 {
11052         int i;
11053         __be32 *buf;
11054         u32 offset = 0, len = 0;
11055         u32 magic, val;
11056
11057         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11058                 return NULL;
11059
11060         if (magic == TG3_EEPROM_MAGIC) {
11061                 for (offset = TG3_NVM_DIR_START;
11062                      offset < TG3_NVM_DIR_END;
11063                      offset += TG3_NVM_DIRENT_SIZE) {
11064                         if (tg3_nvram_read(tp, offset, &val))
11065                                 return NULL;
11066
11067                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11068                             TG3_NVM_DIRTYPE_EXTVPD)
11069                                 break;
11070                 }
11071
11072                 if (offset != TG3_NVM_DIR_END) {
11073                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11074                         if (tg3_nvram_read(tp, offset + 4, &offset))
11075                                 return NULL;
11076
11077                         offset = tg3_nvram_logical_addr(tp, offset);
11078                 }
11079         }
11080
11081         if (!offset || !len) {
11082                 offset = TG3_NVM_VPD_OFF;
11083                 len = TG3_NVM_VPD_LEN;
11084         }
11085
11086         buf = kmalloc(len, GFP_KERNEL);
11087         if (buf == NULL)
11088                 return NULL;
11089
11090         if (magic == TG3_EEPROM_MAGIC) {
11091                 for (i = 0; i < len; i += 4) {
11092                         /* The data is in little-endian format in NVRAM.
11093                          * Use the big-endian read routines to preserve
11094                          * the byte order as it exists in NVRAM.
11095                          */
11096                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11097                                 goto error;
11098                 }
11099         } else {
11100                 u8 *ptr;
11101                 ssize_t cnt;
11102                 unsigned int pos = 0;
11103
11104                 ptr = (u8 *)&buf[0];
11105                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11106                         cnt = pci_read_vpd(tp->pdev, pos,
11107                                            len - pos, ptr);
11108                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11109                                 cnt = 0;
11110                         else if (cnt < 0)
11111                                 goto error;
11112                 }
11113                 if (pos != len)
11114                         goto error;
11115         }
11116
11117         *vpdlen = len;
11118
11119         return buf;
11120
11121 error:
11122         kfree(buf);
11123         return NULL;
11124 }
11125
11126 #define NVRAM_TEST_SIZE 0x100
11127 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11128 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11129 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11130 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11131 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11132 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11133 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11134 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11135
11136 static int tg3_test_nvram(struct tg3 *tp)
11137 {
11138         u32 csum, magic, len;
11139         __be32 *buf;
11140         int i, j, k, err = 0, size;
11141
11142         if (tg3_flag(tp, NO_NVRAM))
11143                 return 0;
11144
11145         if (tg3_nvram_read(tp, 0, &magic) != 0)
11146                 return -EIO;
11147
11148         if (magic == TG3_EEPROM_MAGIC)
11149                 size = NVRAM_TEST_SIZE;
11150         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11151                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11152                     TG3_EEPROM_SB_FORMAT_1) {
11153                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11154                         case TG3_EEPROM_SB_REVISION_0:
11155                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11156                                 break;
11157                         case TG3_EEPROM_SB_REVISION_2:
11158                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11159                                 break;
11160                         case TG3_EEPROM_SB_REVISION_3:
11161                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11162                                 break;
11163                         case TG3_EEPROM_SB_REVISION_4:
11164                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11165                                 break;
11166                         case TG3_EEPROM_SB_REVISION_5:
11167                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11168                                 break;
11169                         case TG3_EEPROM_SB_REVISION_6:
11170                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11171                                 break;
11172                         default:
11173                                 return -EIO;
11174                         }
11175                 } else
11176                         return 0;
11177         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11178                 size = NVRAM_SELFBOOT_HW_SIZE;
11179         else
11180                 return -EIO;
11181
11182         buf = kmalloc(size, GFP_KERNEL);
11183         if (buf == NULL)
11184                 return -ENOMEM;
11185
11186         err = -EIO;
11187         for (i = 0, j = 0; i < size; i += 4, j++) {
11188                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11189                 if (err)
11190                         break;
11191         }
11192         if (i < size)
11193                 goto out;
11194
11195         /* Selfboot format */
11196         magic = be32_to_cpu(buf[0]);
11197         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11198             TG3_EEPROM_MAGIC_FW) {
11199                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11200
11201                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11202                     TG3_EEPROM_SB_REVISION_2) {
11203                         /* For rev 2, the csum doesn't include the MBA. */
11204                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11205                                 csum8 += buf8[i];
11206                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11207                                 csum8 += buf8[i];
11208                 } else {
11209                         for (i = 0; i < size; i++)
11210                                 csum8 += buf8[i];
11211                 }
11212
11213                 if (csum8 == 0) {
11214                         err = 0;
11215                         goto out;
11216                 }
11217
11218                 err = -EIO;
11219                 goto out;
11220         }
11221
11222         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11223             TG3_EEPROM_MAGIC_HW) {
11224                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11225                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11226                 u8 *buf8 = (u8 *) buf;
11227
11228                 /* Separate the parity bits and the data bytes.  */
11229                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11230                         if ((i == 0) || (i == 8)) {
11231                                 int l;
11232                                 u8 msk;
11233
11234                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11235                                         parity[k++] = buf8[i] & msk;
11236                                 i++;
11237                         } else if (i == 16) {
11238                                 int l;
11239                                 u8 msk;
11240
11241                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11242                                         parity[k++] = buf8[i] & msk;
11243                                 i++;
11244
11245                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11246                                         parity[k++] = buf8[i] & msk;
11247                                 i++;
11248                         }
11249                         data[j++] = buf8[i];
11250                 }
11251
11252                 err = -EIO;
11253                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11254                         u8 hw8 = hweight8(data[i]);
11255
11256                         if ((hw8 & 0x1) && parity[i])
11257                                 goto out;
11258                         else if (!(hw8 & 0x1) && !parity[i])
11259                                 goto out;
11260                 }
11261                 err = 0;
11262                 goto out;
11263         }
11264
11265         err = -EIO;
11266
11267         /* Bootstrap checksum at offset 0x10 */
11268         csum = calc_crc((unsigned char *) buf, 0x10);
11269         if (csum != le32_to_cpu(buf[0x10/4]))
11270                 goto out;
11271
11272         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11273         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11274         if (csum != le32_to_cpu(buf[0xfc/4]))
11275                 goto out;
11276
11277         kfree(buf);
11278
11279         buf = tg3_vpd_readblock(tp, &len);
11280         if (!buf)
11281                 return -ENOMEM;
11282
11283         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11284         if (i > 0) {
11285                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11286                 if (j < 0)
11287                         goto out;
11288
11289                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11290                         goto out;
11291
11292                 i += PCI_VPD_LRDT_TAG_SIZE;
11293                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11294                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11295                 if (j > 0) {
11296                         u8 csum8 = 0;
11297
11298                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11299
11300                         for (i = 0; i <= j; i++)
11301                                 csum8 += ((u8 *)buf)[i];
11302
11303                         if (csum8)
11304                                 goto out;
11305                 }
11306         }
11307
11308         err = 0;
11309
11310 out:
11311         kfree(buf);
11312         return err;
11313 }
11314
11315 #define TG3_SERDES_TIMEOUT_SEC  2
11316 #define TG3_COPPER_TIMEOUT_SEC  6
11317
11318 static int tg3_test_link(struct tg3 *tp)
11319 {
11320         int i, max;
11321
11322         if (!netif_running(tp->dev))
11323                 return -ENODEV;
11324
11325         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11326                 max = TG3_SERDES_TIMEOUT_SEC;
11327         else
11328                 max = TG3_COPPER_TIMEOUT_SEC;
11329
11330         for (i = 0; i < max; i++) {
11331                 if (netif_carrier_ok(tp->dev))
11332                         return 0;
11333
11334                 if (msleep_interruptible(1000))
11335                         break;
11336         }
11337
11338         return -EIO;
11339 }
11340
11341 /* Only test the commonly used registers */
11342 static int tg3_test_registers(struct tg3 *tp)
11343 {
11344         int i, is_5705, is_5750;
11345         u32 offset, read_mask, write_mask, val, save_val, read_val;
11346         static struct {
11347                 u16 offset;
11348                 u16 flags;
11349 #define TG3_FL_5705     0x1
11350 #define TG3_FL_NOT_5705 0x2
11351 #define TG3_FL_NOT_5788 0x4
11352 #define TG3_FL_NOT_5750 0x8
11353                 u32 read_mask;
11354                 u32 write_mask;
11355         } reg_tbl[] = {
11356                 /* MAC Control Registers */
11357                 { MAC_MODE, TG3_FL_NOT_5705,
11358                         0x00000000, 0x00ef6f8c },
11359                 { MAC_MODE, TG3_FL_5705,
11360                         0x00000000, 0x01ef6b8c },
11361                 { MAC_STATUS, TG3_FL_NOT_5705,
11362                         0x03800107, 0x00000000 },
11363                 { MAC_STATUS, TG3_FL_5705,
11364                         0x03800100, 0x00000000 },
11365                 { MAC_ADDR_0_HIGH, 0x0000,
11366                         0x00000000, 0x0000ffff },
11367                 { MAC_ADDR_0_LOW, 0x0000,
11368                         0x00000000, 0xffffffff },
11369                 { MAC_RX_MTU_SIZE, 0x0000,
11370                         0x00000000, 0x0000ffff },
11371                 { MAC_TX_MODE, 0x0000,
11372                         0x00000000, 0x00000070 },
11373                 { MAC_TX_LENGTHS, 0x0000,
11374                         0x00000000, 0x00003fff },
11375                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11376                         0x00000000, 0x000007fc },
11377                 { MAC_RX_MODE, TG3_FL_5705,
11378                         0x00000000, 0x000007dc },
11379                 { MAC_HASH_REG_0, 0x0000,
11380                         0x00000000, 0xffffffff },
11381                 { MAC_HASH_REG_1, 0x0000,
11382                         0x00000000, 0xffffffff },
11383                 { MAC_HASH_REG_2, 0x0000,
11384                         0x00000000, 0xffffffff },
11385                 { MAC_HASH_REG_3, 0x0000,
11386                         0x00000000, 0xffffffff },
11387
11388                 /* Receive Data and Receive BD Initiator Control Registers. */
11389                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11390                         0x00000000, 0xffffffff },
11391                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11392                         0x00000000, 0xffffffff },
11393                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11394                         0x00000000, 0x00000003 },
11395                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11396                         0x00000000, 0xffffffff },
11397                 { RCVDBDI_STD_BD+0, 0x0000,
11398                         0x00000000, 0xffffffff },
11399                 { RCVDBDI_STD_BD+4, 0x0000,
11400                         0x00000000, 0xffffffff },
11401                 { RCVDBDI_STD_BD+8, 0x0000,
11402                         0x00000000, 0xffff0002 },
11403                 { RCVDBDI_STD_BD+0xc, 0x0000,
11404                         0x00000000, 0xffffffff },
11405
11406                 /* Receive BD Initiator Control Registers. */
11407                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11408                         0x00000000, 0xffffffff },
11409                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11410                         0x00000000, 0x000003ff },
11411                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11412                         0x00000000, 0xffffffff },
11413
11414                 /* Host Coalescing Control Registers. */
11415                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11416                         0x00000000, 0x00000004 },
11417                 { HOSTCC_MODE, TG3_FL_5705,
11418                         0x00000000, 0x000000f6 },
11419                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11420                         0x00000000, 0xffffffff },
11421                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11422                         0x00000000, 0x000003ff },
11423                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11424                         0x00000000, 0xffffffff },
11425                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11426                         0x00000000, 0x000003ff },
11427                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11428                         0x00000000, 0xffffffff },
11429                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11430                         0x00000000, 0x000000ff },
11431                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11432                         0x00000000, 0xffffffff },
11433                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11434                         0x00000000, 0x000000ff },
11435                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11436                         0x00000000, 0xffffffff },
11437                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11438                         0x00000000, 0xffffffff },
11439                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11440                         0x00000000, 0xffffffff },
11441                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11442                         0x00000000, 0x000000ff },
11443                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11444                         0x00000000, 0xffffffff },
11445                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11446                         0x00000000, 0x000000ff },
11447                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11448                         0x00000000, 0xffffffff },
11449                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11450                         0x00000000, 0xffffffff },
11451                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11452                         0x00000000, 0xffffffff },
11453                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11454                         0x00000000, 0xffffffff },
11455                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11456                         0x00000000, 0xffffffff },
11457                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11458                         0xffffffff, 0x00000000 },
11459                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11460                         0xffffffff, 0x00000000 },
11461
11462                 /* Buffer Manager Control Registers. */
11463                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11464                         0x00000000, 0x007fff80 },
11465                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11466                         0x00000000, 0x007fffff },
11467                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11468                         0x00000000, 0x0000003f },
11469                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11470                         0x00000000, 0x000001ff },
11471                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11472                         0x00000000, 0x000001ff },
11473                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11474                         0xffffffff, 0x00000000 },
11475                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11476                         0xffffffff, 0x00000000 },
11477
11478                 /* Mailbox Registers */
11479                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11480                         0x00000000, 0x000001ff },
11481                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11482                         0x00000000, 0x000001ff },
11483                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11484                         0x00000000, 0x000007ff },
11485                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11486                         0x00000000, 0x000001ff },
11487
11488                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11489         };
11490
11491         is_5705 = is_5750 = 0;
11492         if (tg3_flag(tp, 5705_PLUS)) {
11493                 is_5705 = 1;
11494                 if (tg3_flag(tp, 5750_PLUS))
11495                         is_5750 = 1;
11496         }
11497
11498         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11499                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11500                         continue;
11501
11502                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11503                         continue;
11504
11505                 if (tg3_flag(tp, IS_5788) &&
11506                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11507                         continue;
11508
11509                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11510                         continue;
11511
11512                 offset = (u32) reg_tbl[i].offset;
11513                 read_mask = reg_tbl[i].read_mask;
11514                 write_mask = reg_tbl[i].write_mask;
11515
11516                 /* Save the original register content */
11517                 save_val = tr32(offset);
11518
11519                 /* Determine the read-only value. */
11520                 read_val = save_val & read_mask;
11521
11522                 /* Write zero to the register, then make sure the read-only bits
11523                  * are not changed and the read/write bits are all zeros.
11524                  */
11525                 tw32(offset, 0);
11526
11527                 val = tr32(offset);
11528
11529                 /* Test the read-only and read/write bits. */
11530                 if (((val & read_mask) != read_val) || (val & write_mask))
11531                         goto out;
11532
11533                 /* Write ones to all the bits defined by RdMask and WrMask, then
11534                  * make sure the read-only bits are not changed and the
11535                  * read/write bits are all ones.
11536                  */
11537                 tw32(offset, read_mask | write_mask);
11538
11539                 val = tr32(offset);
11540
11541                 /* Test the read-only bits. */
11542                 if ((val & read_mask) != read_val)
11543                         goto out;
11544
11545                 /* Test the read/write bits. */
11546                 if ((val & write_mask) != write_mask)
11547                         goto out;
11548
11549                 tw32(offset, save_val);
11550         }
11551
11552         return 0;
11553
11554 out:
11555         if (netif_msg_hw(tp))
11556                 netdev_err(tp->dev,
11557                            "Register test failed at offset %x\n", offset);
11558         tw32(offset, save_val);
11559         return -EIO;
11560 }
11561
11562 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11563 {
11564         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11565         int i;
11566         u32 j;
11567
11568         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11569                 for (j = 0; j < len; j += 4) {
11570                         u32 val;
11571
11572                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11573                         tg3_read_mem(tp, offset + j, &val);
11574                         if (val != test_pattern[i])
11575                                 return -EIO;
11576                 }
11577         }
11578         return 0;
11579 }
11580
11581 static int tg3_test_memory(struct tg3 *tp)
11582 {
11583         static struct mem_entry {
11584                 u32 offset;
11585                 u32 len;
11586         } mem_tbl_570x[] = {
11587                 { 0x00000000, 0x00b50},
11588                 { 0x00002000, 0x1c000},
11589                 { 0xffffffff, 0x00000}
11590         }, mem_tbl_5705[] = {
11591                 { 0x00000100, 0x0000c},
11592                 { 0x00000200, 0x00008},
11593                 { 0x00004000, 0x00800},
11594                 { 0x00006000, 0x01000},
11595                 { 0x00008000, 0x02000},
11596                 { 0x00010000, 0x0e000},
11597                 { 0xffffffff, 0x00000}
11598         }, mem_tbl_5755[] = {
11599                 { 0x00000200, 0x00008},
11600                 { 0x00004000, 0x00800},
11601                 { 0x00006000, 0x00800},
11602                 { 0x00008000, 0x02000},
11603                 { 0x00010000, 0x0c000},
11604                 { 0xffffffff, 0x00000}
11605         }, mem_tbl_5906[] = {
11606                 { 0x00000200, 0x00008},
11607                 { 0x00004000, 0x00400},
11608                 { 0x00006000, 0x00400},
11609                 { 0x00008000, 0x01000},
11610                 { 0x00010000, 0x01000},
11611                 { 0xffffffff, 0x00000}
11612         }, mem_tbl_5717[] = {
11613                 { 0x00000200, 0x00008},
11614                 { 0x00010000, 0x0a000},
11615                 { 0x00020000, 0x13c00},
11616                 { 0xffffffff, 0x00000}
11617         }, mem_tbl_57765[] = {
11618                 { 0x00000200, 0x00008},
11619                 { 0x00004000, 0x00800},
11620                 { 0x00006000, 0x09800},
11621                 { 0x00010000, 0x0a000},
11622                 { 0xffffffff, 0x00000}
11623         };
11624         struct mem_entry *mem_tbl;
11625         int err = 0;
11626         int i;
11627
11628         if (tg3_flag(tp, 5717_PLUS))
11629                 mem_tbl = mem_tbl_5717;
11630         else if (tg3_flag(tp, 57765_CLASS))
11631                 mem_tbl = mem_tbl_57765;
11632         else if (tg3_flag(tp, 5755_PLUS))
11633                 mem_tbl = mem_tbl_5755;
11634         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11635                 mem_tbl = mem_tbl_5906;
11636         else if (tg3_flag(tp, 5705_PLUS))
11637                 mem_tbl = mem_tbl_5705;
11638         else
11639                 mem_tbl = mem_tbl_570x;
11640
11641         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11642                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11643                 if (err)
11644                         break;
11645         }
11646
11647         return err;
11648 }
11649
11650 #define TG3_TSO_MSS             500
11651
11652 #define TG3_TSO_IP_HDR_LEN      20
11653 #define TG3_TSO_TCP_HDR_LEN     20
11654 #define TG3_TSO_TCP_OPT_LEN     12
11655
11656 static const u8 tg3_tso_header[] = {
11657 0x08, 0x00,
11658 0x45, 0x00, 0x00, 0x00,
11659 0x00, 0x00, 0x40, 0x00,
11660 0x40, 0x06, 0x00, 0x00,
11661 0x0a, 0x00, 0x00, 0x01,
11662 0x0a, 0x00, 0x00, 0x02,
11663 0x0d, 0x00, 0xe0, 0x00,
11664 0x00, 0x00, 0x01, 0x00,
11665 0x00, 0x00, 0x02, 0x00,
11666 0x80, 0x10, 0x10, 0x00,
11667 0x14, 0x09, 0x00, 0x00,
11668 0x01, 0x01, 0x08, 0x0a,
11669 0x11, 0x11, 0x11, 0x11,
11670 0x11, 0x11, 0x11, 0x11,
11671 };
11672
11673 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11674 {
11675         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11676         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11677         u32 budget;
11678         struct sk_buff *skb;
11679         u8 *tx_data, *rx_data;
11680         dma_addr_t map;
11681         int num_pkts, tx_len, rx_len, i, err;
11682         struct tg3_rx_buffer_desc *desc;
11683         struct tg3_napi *tnapi, *rnapi;
11684         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11685
11686         tnapi = &tp->napi[0];
11687         rnapi = &tp->napi[0];
11688         if (tp->irq_cnt > 1) {
11689                 if (tg3_flag(tp, ENABLE_RSS))
11690                         rnapi = &tp->napi[1];
11691                 if (tg3_flag(tp, ENABLE_TSS))
11692                         tnapi = &tp->napi[1];
11693         }
11694         coal_now = tnapi->coal_now | rnapi->coal_now;
11695
11696         err = -EIO;
11697
11698         tx_len = pktsz;
11699         skb = netdev_alloc_skb(tp->dev, tx_len);
11700         if (!skb)
11701                 return -ENOMEM;
11702
11703         tx_data = skb_put(skb, tx_len);
11704         memcpy(tx_data, tp->dev->dev_addr, 6);
11705         memset(tx_data + 6, 0x0, 8);
11706
11707         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11708
11709         if (tso_loopback) {
11710                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11711
11712                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11713                               TG3_TSO_TCP_OPT_LEN;
11714
11715                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11716                        sizeof(tg3_tso_header));
11717                 mss = TG3_TSO_MSS;
11718
11719                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11720                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11721
11722                 /* Set the total length field in the IP header */
11723                 iph->tot_len = htons((u16)(mss + hdr_len));
11724
11725                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11726                               TXD_FLAG_CPU_POST_DMA);
11727
11728                 if (tg3_flag(tp, HW_TSO_1) ||
11729                     tg3_flag(tp, HW_TSO_2) ||
11730                     tg3_flag(tp, HW_TSO_3)) {
11731                         struct tcphdr *th;
11732                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11733                         th = (struct tcphdr *)&tx_data[val];
11734                         th->check = 0;
11735                 } else
11736                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11737
11738                 if (tg3_flag(tp, HW_TSO_3)) {
11739                         mss |= (hdr_len & 0xc) << 12;
11740                         if (hdr_len & 0x10)
11741                                 base_flags |= 0x00000010;
11742                         base_flags |= (hdr_len & 0x3e0) << 5;
11743                 } else if (tg3_flag(tp, HW_TSO_2))
11744                         mss |= hdr_len << 9;
11745                 else if (tg3_flag(tp, HW_TSO_1) ||
11746                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11747                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11748                 } else {
11749                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11750                 }
11751
11752                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11753         } else {
11754                 num_pkts = 1;
11755                 data_off = ETH_HLEN;
11756
11757                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11758                     tx_len > VLAN_ETH_FRAME_LEN)
11759                         base_flags |= TXD_FLAG_JMB_PKT;
11760         }
11761
11762         for (i = data_off; i < tx_len; i++)
11763                 tx_data[i] = (u8) (i & 0xff);
11764
11765         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11766         if (pci_dma_mapping_error(tp->pdev, map)) {
11767                 dev_kfree_skb(skb);
11768                 return -EIO;
11769         }
11770
11771         val = tnapi->tx_prod;
11772         tnapi->tx_buffers[val].skb = skb;
11773         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11774
11775         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11776                rnapi->coal_now);
11777
11778         udelay(10);
11779
11780         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11781
11782         budget = tg3_tx_avail(tnapi);
11783         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11784                             base_flags | TXD_FLAG_END, mss, 0)) {
11785                 tnapi->tx_buffers[val].skb = NULL;
11786                 dev_kfree_skb(skb);
11787                 return -EIO;
11788         }
11789
11790         tnapi->tx_prod++;
11791
11792         /* Sync BD data before updating mailbox */
11793         wmb();
11794
11795         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11796         tr32_mailbox(tnapi->prodmbox);
11797
11798         udelay(10);
11799
11800         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11801         for (i = 0; i < 35; i++) {
11802                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11803                        coal_now);
11804
11805                 udelay(10);
11806
11807                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11808                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11809                 if ((tx_idx == tnapi->tx_prod) &&
11810                     (rx_idx == (rx_start_idx + num_pkts)))
11811                         break;
11812         }
11813
11814         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11815         dev_kfree_skb(skb);
11816
11817         if (tx_idx != tnapi->tx_prod)
11818                 goto out;
11819
11820         if (rx_idx != rx_start_idx + num_pkts)
11821                 goto out;
11822
11823         val = data_off;
11824         while (rx_idx != rx_start_idx) {
11825                 desc = &rnapi->rx_rcb[rx_start_idx++];
11826                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11827                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11828
11829                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11830                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11831                         goto out;
11832
11833                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11834                          - ETH_FCS_LEN;
11835
11836                 if (!tso_loopback) {
11837                         if (rx_len != tx_len)
11838                                 goto out;
11839
11840                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11841                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11842                                         goto out;
11843                         } else {
11844                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11845                                         goto out;
11846                         }
11847                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11848                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11849                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11850                         goto out;
11851                 }
11852
11853                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11854                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11855                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11856                                              mapping);
11857                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11858                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11859                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11860                                              mapping);
11861                 } else
11862                         goto out;
11863
11864                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11865                                             PCI_DMA_FROMDEVICE);
11866
11867                 rx_data += TG3_RX_OFFSET(tp);
11868                 for (i = data_off; i < rx_len; i++, val++) {
11869                         if (*(rx_data + i) != (u8) (val & 0xff))
11870                                 goto out;
11871                 }
11872         }
11873
11874         err = 0;
11875
11876         /* tg3_free_rings will unmap and free the rx_data */
11877 out:
11878         return err;
11879 }
11880
11881 #define TG3_STD_LOOPBACK_FAILED         1
11882 #define TG3_JMB_LOOPBACK_FAILED         2
11883 #define TG3_TSO_LOOPBACK_FAILED         4
11884 #define TG3_LOOPBACK_FAILED \
11885         (TG3_STD_LOOPBACK_FAILED | \
11886          TG3_JMB_LOOPBACK_FAILED | \
11887          TG3_TSO_LOOPBACK_FAILED)
11888
11889 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11890 {
11891         int err = -EIO;
11892         u32 eee_cap;
11893         u32 jmb_pkt_sz = 9000;
11894
11895         if (tp->dma_limit)
11896                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11897
11898         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11899         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11900
11901         if (!netif_running(tp->dev)) {
11902                 data[0] = TG3_LOOPBACK_FAILED;
11903                 data[1] = TG3_LOOPBACK_FAILED;
11904                 if (do_extlpbk)
11905                         data[2] = TG3_LOOPBACK_FAILED;
11906                 goto done;
11907         }
11908
11909         err = tg3_reset_hw(tp, 1);
11910         if (err) {
11911                 data[0] = TG3_LOOPBACK_FAILED;
11912                 data[1] = TG3_LOOPBACK_FAILED;
11913                 if (do_extlpbk)
11914                         data[2] = TG3_LOOPBACK_FAILED;
11915                 goto done;
11916         }
11917
11918         if (tg3_flag(tp, ENABLE_RSS)) {
11919                 int i;
11920
11921                 /* Reroute all rx packets to the 1st queue */
11922                 for (i = MAC_RSS_INDIR_TBL_0;
11923                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11924                         tw32(i, 0x0);
11925         }
11926
11927         /* HW errata - mac loopback fails in some cases on 5780.
11928          * Normal traffic and PHY loopback are not affected by
11929          * errata.  Also, the MAC loopback test is deprecated for
11930          * all newer ASIC revisions.
11931          */
11932         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11933             !tg3_flag(tp, CPMU_PRESENT)) {
11934                 tg3_mac_loopback(tp, true);
11935
11936                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11937                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11938
11939                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11940                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11941                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11942
11943                 tg3_mac_loopback(tp, false);
11944         }
11945
11946         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11947             !tg3_flag(tp, USE_PHYLIB)) {
11948                 int i;
11949
11950                 tg3_phy_lpbk_set(tp, 0, false);
11951
11952                 /* Wait for link */
11953                 for (i = 0; i < 100; i++) {
11954                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11955                                 break;
11956                         mdelay(1);
11957                 }
11958
11959                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11960                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11961                 if (tg3_flag(tp, TSO_CAPABLE) &&
11962                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11963                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11964                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11965                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11966                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11967
11968                 if (do_extlpbk) {
11969                         tg3_phy_lpbk_set(tp, 0, true);
11970
11971                         /* All link indications report up, but the hardware
11972                          * isn't really ready for about 20 msec.  Double it
11973                          * to be sure.
11974                          */
11975                         mdelay(40);
11976
11977                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11978                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11979                         if (tg3_flag(tp, TSO_CAPABLE) &&
11980                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11981                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11982                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11983                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11984                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11985                 }
11986
11987                 /* Re-enable gphy autopowerdown. */
11988                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11989                         tg3_phy_toggle_apd(tp, true);
11990         }
11991
11992         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11993
11994 done:
11995         tp->phy_flags |= eee_cap;
11996
11997         return err;
11998 }
11999
12000 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12001                           u64 *data)
12002 {
12003         struct tg3 *tp = netdev_priv(dev);
12004         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12005
12006         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12007             tg3_power_up(tp)) {
12008                 etest->flags |= ETH_TEST_FL_FAILED;
12009                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12010                 return;
12011         }
12012
12013         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12014
12015         if (tg3_test_nvram(tp) != 0) {
12016                 etest->flags |= ETH_TEST_FL_FAILED;
12017                 data[0] = 1;
12018         }
12019         if (!doextlpbk && tg3_test_link(tp)) {
12020                 etest->flags |= ETH_TEST_FL_FAILED;
12021                 data[1] = 1;
12022         }
12023         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12024                 int err, err2 = 0, irq_sync = 0;
12025
12026                 if (netif_running(dev)) {
12027                         tg3_phy_stop(tp);
12028                         tg3_netif_stop(tp);
12029                         irq_sync = 1;
12030                 }
12031
12032                 tg3_full_lock(tp, irq_sync);
12033
12034                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12035                 err = tg3_nvram_lock(tp);
12036                 tg3_halt_cpu(tp, RX_CPU_BASE);
12037                 if (!tg3_flag(tp, 5705_PLUS))
12038                         tg3_halt_cpu(tp, TX_CPU_BASE);
12039                 if (!err)
12040                         tg3_nvram_unlock(tp);
12041
12042                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12043                         tg3_phy_reset(tp);
12044
12045                 if (tg3_test_registers(tp) != 0) {
12046                         etest->flags |= ETH_TEST_FL_FAILED;
12047                         data[2] = 1;
12048                 }
12049
12050                 if (tg3_test_memory(tp) != 0) {
12051                         etest->flags |= ETH_TEST_FL_FAILED;
12052                         data[3] = 1;
12053                 }
12054
12055                 if (doextlpbk)
12056                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12057
12058                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12059                         etest->flags |= ETH_TEST_FL_FAILED;
12060
12061                 tg3_full_unlock(tp);
12062
12063                 if (tg3_test_interrupt(tp) != 0) {
12064                         etest->flags |= ETH_TEST_FL_FAILED;
12065                         data[7] = 1;
12066                 }
12067
12068                 tg3_full_lock(tp, 0);
12069
12070                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12071                 if (netif_running(dev)) {
12072                         tg3_flag_set(tp, INIT_COMPLETE);
12073                         err2 = tg3_restart_hw(tp, 1);
12074                         if (!err2)
12075                                 tg3_netif_start(tp);
12076                 }
12077
12078                 tg3_full_unlock(tp);
12079
12080                 if (irq_sync && !err2)
12081                         tg3_phy_start(tp);
12082         }
12083         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12084                 tg3_power_down(tp);
12085
12086 }
12087
12088 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12089 {
12090         struct mii_ioctl_data *data = if_mii(ifr);
12091         struct tg3 *tp = netdev_priv(dev);
12092         int err;
12093
12094         if (tg3_flag(tp, USE_PHYLIB)) {
12095                 struct phy_device *phydev;
12096                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12097                         return -EAGAIN;
12098                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12099                 return phy_mii_ioctl(phydev, ifr, cmd);
12100         }
12101
12102         switch (cmd) {
12103         case SIOCGMIIPHY:
12104                 data->phy_id = tp->phy_addr;
12105
12106                 /* fallthru */
12107         case SIOCGMIIREG: {
12108                 u32 mii_regval;
12109
12110                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12111                         break;                  /* We have no PHY */
12112
12113                 if (!netif_running(dev))
12114                         return -EAGAIN;
12115
12116                 spin_lock_bh(&tp->lock);
12117                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12118                 spin_unlock_bh(&tp->lock);
12119
12120                 data->val_out = mii_regval;
12121
12122                 return err;
12123         }
12124
12125         case SIOCSMIIREG:
12126                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12127                         break;                  /* We have no PHY */
12128
12129                 if (!netif_running(dev))
12130                         return -EAGAIN;
12131
12132                 spin_lock_bh(&tp->lock);
12133                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12134                 spin_unlock_bh(&tp->lock);
12135
12136                 return err;
12137
12138         default:
12139                 /* do nothing */
12140                 break;
12141         }
12142         return -EOPNOTSUPP;
12143 }
12144
12145 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12146 {
12147         struct tg3 *tp = netdev_priv(dev);
12148
12149         memcpy(ec, &tp->coal, sizeof(*ec));
12150         return 0;
12151 }
12152
12153 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12154 {
12155         struct tg3 *tp = netdev_priv(dev);
12156         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12157         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12158
12159         if (!tg3_flag(tp, 5705_PLUS)) {
12160                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12161                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12162                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12163                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12164         }
12165
12166         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12167             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12168             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12169             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12170             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12171             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12172             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12173             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12174             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12175             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12176                 return -EINVAL;
12177
12178         /* No rx interrupts will be generated if both are zero */
12179         if ((ec->rx_coalesce_usecs == 0) &&
12180             (ec->rx_max_coalesced_frames == 0))
12181                 return -EINVAL;
12182
12183         /* No tx interrupts will be generated if both are zero */
12184         if ((ec->tx_coalesce_usecs == 0) &&
12185             (ec->tx_max_coalesced_frames == 0))
12186                 return -EINVAL;
12187
12188         /* Only copy relevant parameters, ignore all others. */
12189         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12190         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12191         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12192         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12193         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12194         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12195         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12196         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12197         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12198
12199         if (netif_running(dev)) {
12200                 tg3_full_lock(tp, 0);
12201                 __tg3_set_coalesce(tp, &tp->coal);
12202                 tg3_full_unlock(tp);
12203         }
12204         return 0;
12205 }
12206
12207 static const struct ethtool_ops tg3_ethtool_ops = {
12208         .get_settings           = tg3_get_settings,
12209         .set_settings           = tg3_set_settings,
12210         .get_drvinfo            = tg3_get_drvinfo,
12211         .get_regs_len           = tg3_get_regs_len,
12212         .get_regs               = tg3_get_regs,
12213         .get_wol                = tg3_get_wol,
12214         .set_wol                = tg3_set_wol,
12215         .get_msglevel           = tg3_get_msglevel,
12216         .set_msglevel           = tg3_set_msglevel,
12217         .nway_reset             = tg3_nway_reset,
12218         .get_link               = ethtool_op_get_link,
12219         .get_eeprom_len         = tg3_get_eeprom_len,
12220         .get_eeprom             = tg3_get_eeprom,
12221         .set_eeprom             = tg3_set_eeprom,
12222         .get_ringparam          = tg3_get_ringparam,
12223         .set_ringparam          = tg3_set_ringparam,
12224         .get_pauseparam         = tg3_get_pauseparam,
12225         .set_pauseparam         = tg3_set_pauseparam,
12226         .self_test              = tg3_self_test,
12227         .get_strings            = tg3_get_strings,
12228         .set_phys_id            = tg3_set_phys_id,
12229         .get_ethtool_stats      = tg3_get_ethtool_stats,
12230         .get_coalesce           = tg3_get_coalesce,
12231         .set_coalesce           = tg3_set_coalesce,
12232         .get_sset_count         = tg3_get_sset_count,
12233         .get_rxnfc              = tg3_get_rxnfc,
12234         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12235         .get_rxfh_indir         = tg3_get_rxfh_indir,
12236         .set_rxfh_indir         = tg3_set_rxfh_indir,
12237         .get_ts_info            = ethtool_op_get_ts_info,
12238 };
12239
12240 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12241                                                 struct rtnl_link_stats64 *stats)
12242 {
12243         struct tg3 *tp = netdev_priv(dev);
12244
12245         if (!tp->hw_stats)
12246                 return &tp->net_stats_prev;
12247
12248         spin_lock_bh(&tp->lock);
12249         tg3_get_nstats(tp, stats);
12250         spin_unlock_bh(&tp->lock);
12251
12252         return stats;
12253 }
12254
12255 static void tg3_set_rx_mode(struct net_device *dev)
12256 {
12257         struct tg3 *tp = netdev_priv(dev);
12258
12259         if (!netif_running(dev))
12260                 return;
12261
12262         tg3_full_lock(tp, 0);
12263         __tg3_set_rx_mode(dev);
12264         tg3_full_unlock(tp);
12265 }
12266
12267 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12268                                int new_mtu)
12269 {
12270         dev->mtu = new_mtu;
12271
12272         if (new_mtu > ETH_DATA_LEN) {
12273                 if (tg3_flag(tp, 5780_CLASS)) {
12274                         netdev_update_features(dev);
12275                         tg3_flag_clear(tp, TSO_CAPABLE);
12276                 } else {
12277                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12278                 }
12279         } else {
12280                 if (tg3_flag(tp, 5780_CLASS)) {
12281                         tg3_flag_set(tp, TSO_CAPABLE);
12282                         netdev_update_features(dev);
12283                 }
12284                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12285         }
12286 }
12287
12288 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12289 {
12290         struct tg3 *tp = netdev_priv(dev);
12291         int err, reset_phy = 0;
12292
12293         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12294                 return -EINVAL;
12295
12296         if (!netif_running(dev)) {
12297                 /* We'll just catch it later when the
12298                  * device is up'd.
12299                  */
12300                 tg3_set_mtu(dev, tp, new_mtu);
12301                 return 0;
12302         }
12303
12304         tg3_phy_stop(tp);
12305
12306         tg3_netif_stop(tp);
12307
12308         tg3_full_lock(tp, 1);
12309
12310         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12311
12312         tg3_set_mtu(dev, tp, new_mtu);
12313
12314         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12315          * breaks all requests to 256 bytes.
12316          */
12317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12318                 reset_phy = 1;
12319
12320         err = tg3_restart_hw(tp, reset_phy);
12321
12322         if (!err)
12323                 tg3_netif_start(tp);
12324
12325         tg3_full_unlock(tp);
12326
12327         if (!err)
12328                 tg3_phy_start(tp);
12329
12330         return err;
12331 }
12332
12333 static const struct net_device_ops tg3_netdev_ops = {
12334         .ndo_open               = tg3_open,
12335         .ndo_stop               = tg3_close,
12336         .ndo_start_xmit         = tg3_start_xmit,
12337         .ndo_get_stats64        = tg3_get_stats64,
12338         .ndo_validate_addr      = eth_validate_addr,
12339         .ndo_set_rx_mode        = tg3_set_rx_mode,
12340         .ndo_set_mac_address    = tg3_set_mac_addr,
12341         .ndo_do_ioctl           = tg3_ioctl,
12342         .ndo_tx_timeout         = tg3_tx_timeout,
12343         .ndo_change_mtu         = tg3_change_mtu,
12344         .ndo_fix_features       = tg3_fix_features,
12345         .ndo_set_features       = tg3_set_features,
12346 #ifdef CONFIG_NET_POLL_CONTROLLER
12347         .ndo_poll_controller    = tg3_poll_controller,
12348 #endif
12349 };
12350
12351 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12352 {
12353         u32 cursize, val, magic;
12354
12355         tp->nvram_size = EEPROM_CHIP_SIZE;
12356
12357         if (tg3_nvram_read(tp, 0, &magic) != 0)
12358                 return;
12359
12360         if ((magic != TG3_EEPROM_MAGIC) &&
12361             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12362             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12363                 return;
12364
12365         /*
12366          * Size the chip by reading offsets at increasing powers of two.
12367          * When we encounter our validation signature, we know the addressing
12368          * has wrapped around, and thus have our chip size.
12369          */
12370         cursize = 0x10;
12371
12372         while (cursize < tp->nvram_size) {
12373                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12374                         return;
12375
12376                 if (val == magic)
12377                         break;
12378
12379                 cursize <<= 1;
12380         }
12381
12382         tp->nvram_size = cursize;
12383 }
12384
12385 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12386 {
12387         u32 val;
12388
12389         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12390                 return;
12391
12392         /* Selfboot format */
12393         if (val != TG3_EEPROM_MAGIC) {
12394                 tg3_get_eeprom_size(tp);
12395                 return;
12396         }
12397
12398         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12399                 if (val != 0) {
12400                         /* This is confusing.  We want to operate on the
12401                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12402                          * call will read from NVRAM and byteswap the data
12403                          * according to the byteswapping settings for all
12404                          * other register accesses.  This ensures the data we
12405                          * want will always reside in the lower 16-bits.
12406                          * However, the data in NVRAM is in LE format, which
12407                          * means the data from the NVRAM read will always be
12408                          * opposite the endianness of the CPU.  The 16-bit
12409                          * byteswap then brings the data to CPU endianness.
12410                          */
12411                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12412                         return;
12413                 }
12414         }
12415         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12416 }
12417
12418 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12419 {
12420         u32 nvcfg1;
12421
12422         nvcfg1 = tr32(NVRAM_CFG1);
12423         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12424                 tg3_flag_set(tp, FLASH);
12425         } else {
12426                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12427                 tw32(NVRAM_CFG1, nvcfg1);
12428         }
12429
12430         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12431             tg3_flag(tp, 5780_CLASS)) {
12432                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12433                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12434                         tp->nvram_jedecnum = JEDEC_ATMEL;
12435                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12436                         tg3_flag_set(tp, NVRAM_BUFFERED);
12437                         break;
12438                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12439                         tp->nvram_jedecnum = JEDEC_ATMEL;
12440                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12441                         break;
12442                 case FLASH_VENDOR_ATMEL_EEPROM:
12443                         tp->nvram_jedecnum = JEDEC_ATMEL;
12444                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12445                         tg3_flag_set(tp, NVRAM_BUFFERED);
12446                         break;
12447                 case FLASH_VENDOR_ST:
12448                         tp->nvram_jedecnum = JEDEC_ST;
12449                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12450                         tg3_flag_set(tp, NVRAM_BUFFERED);
12451                         break;
12452                 case FLASH_VENDOR_SAIFUN:
12453                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12454                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12455                         break;
12456                 case FLASH_VENDOR_SST_SMALL:
12457                 case FLASH_VENDOR_SST_LARGE:
12458                         tp->nvram_jedecnum = JEDEC_SST;
12459                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12460                         break;
12461                 }
12462         } else {
12463                 tp->nvram_jedecnum = JEDEC_ATMEL;
12464                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12465                 tg3_flag_set(tp, NVRAM_BUFFERED);
12466         }
12467 }
12468
12469 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12470 {
12471         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12472         case FLASH_5752PAGE_SIZE_256:
12473                 tp->nvram_pagesize = 256;
12474                 break;
12475         case FLASH_5752PAGE_SIZE_512:
12476                 tp->nvram_pagesize = 512;
12477                 break;
12478         case FLASH_5752PAGE_SIZE_1K:
12479                 tp->nvram_pagesize = 1024;
12480                 break;
12481         case FLASH_5752PAGE_SIZE_2K:
12482                 tp->nvram_pagesize = 2048;
12483                 break;
12484         case FLASH_5752PAGE_SIZE_4K:
12485                 tp->nvram_pagesize = 4096;
12486                 break;
12487         case FLASH_5752PAGE_SIZE_264:
12488                 tp->nvram_pagesize = 264;
12489                 break;
12490         case FLASH_5752PAGE_SIZE_528:
12491                 tp->nvram_pagesize = 528;
12492                 break;
12493         }
12494 }
12495
12496 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12497 {
12498         u32 nvcfg1;
12499
12500         nvcfg1 = tr32(NVRAM_CFG1);
12501
12502         /* NVRAM protection for TPM */
12503         if (nvcfg1 & (1 << 27))
12504                 tg3_flag_set(tp, PROTECTED_NVRAM);
12505
12506         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12507         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12508         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12509                 tp->nvram_jedecnum = JEDEC_ATMEL;
12510                 tg3_flag_set(tp, NVRAM_BUFFERED);
12511                 break;
12512         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12513                 tp->nvram_jedecnum = JEDEC_ATMEL;
12514                 tg3_flag_set(tp, NVRAM_BUFFERED);
12515                 tg3_flag_set(tp, FLASH);
12516                 break;
12517         case FLASH_5752VENDOR_ST_M45PE10:
12518         case FLASH_5752VENDOR_ST_M45PE20:
12519         case FLASH_5752VENDOR_ST_M45PE40:
12520                 tp->nvram_jedecnum = JEDEC_ST;
12521                 tg3_flag_set(tp, NVRAM_BUFFERED);
12522                 tg3_flag_set(tp, FLASH);
12523                 break;
12524         }
12525
12526         if (tg3_flag(tp, FLASH)) {
12527                 tg3_nvram_get_pagesize(tp, nvcfg1);
12528         } else {
12529                 /* For eeprom, set pagesize to maximum eeprom size */
12530                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12531
12532                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12533                 tw32(NVRAM_CFG1, nvcfg1);
12534         }
12535 }
12536
12537 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12538 {
12539         u32 nvcfg1, protect = 0;
12540
12541         nvcfg1 = tr32(NVRAM_CFG1);
12542
12543         /* NVRAM protection for TPM */
12544         if (nvcfg1 & (1 << 27)) {
12545                 tg3_flag_set(tp, PROTECTED_NVRAM);
12546                 protect = 1;
12547         }
12548
12549         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12550         switch (nvcfg1) {
12551         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12552         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12553         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12554         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12555                 tp->nvram_jedecnum = JEDEC_ATMEL;
12556                 tg3_flag_set(tp, NVRAM_BUFFERED);
12557                 tg3_flag_set(tp, FLASH);
12558                 tp->nvram_pagesize = 264;
12559                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12560                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12561                         tp->nvram_size = (protect ? 0x3e200 :
12562                                           TG3_NVRAM_SIZE_512KB);
12563                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12564                         tp->nvram_size = (protect ? 0x1f200 :
12565                                           TG3_NVRAM_SIZE_256KB);
12566                 else
12567                         tp->nvram_size = (protect ? 0x1f200 :
12568                                           TG3_NVRAM_SIZE_128KB);
12569                 break;
12570         case FLASH_5752VENDOR_ST_M45PE10:
12571         case FLASH_5752VENDOR_ST_M45PE20:
12572         case FLASH_5752VENDOR_ST_M45PE40:
12573                 tp->nvram_jedecnum = JEDEC_ST;
12574                 tg3_flag_set(tp, NVRAM_BUFFERED);
12575                 tg3_flag_set(tp, FLASH);
12576                 tp->nvram_pagesize = 256;
12577                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12578                         tp->nvram_size = (protect ?
12579                                           TG3_NVRAM_SIZE_64KB :
12580                                           TG3_NVRAM_SIZE_128KB);
12581                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12582                         tp->nvram_size = (protect ?
12583                                           TG3_NVRAM_SIZE_64KB :
12584                                           TG3_NVRAM_SIZE_256KB);
12585                 else
12586                         tp->nvram_size = (protect ?
12587                                           TG3_NVRAM_SIZE_128KB :
12588                                           TG3_NVRAM_SIZE_512KB);
12589                 break;
12590         }
12591 }
12592
12593 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12594 {
12595         u32 nvcfg1;
12596
12597         nvcfg1 = tr32(NVRAM_CFG1);
12598
12599         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12600         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12601         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12602         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12603         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12604                 tp->nvram_jedecnum = JEDEC_ATMEL;
12605                 tg3_flag_set(tp, NVRAM_BUFFERED);
12606                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12607
12608                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12609                 tw32(NVRAM_CFG1, nvcfg1);
12610                 break;
12611         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12612         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12613         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12614         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12615                 tp->nvram_jedecnum = JEDEC_ATMEL;
12616                 tg3_flag_set(tp, NVRAM_BUFFERED);
12617                 tg3_flag_set(tp, FLASH);
12618                 tp->nvram_pagesize = 264;
12619                 break;
12620         case FLASH_5752VENDOR_ST_M45PE10:
12621         case FLASH_5752VENDOR_ST_M45PE20:
12622         case FLASH_5752VENDOR_ST_M45PE40:
12623                 tp->nvram_jedecnum = JEDEC_ST;
12624                 tg3_flag_set(tp, NVRAM_BUFFERED);
12625                 tg3_flag_set(tp, FLASH);
12626                 tp->nvram_pagesize = 256;
12627                 break;
12628         }
12629 }
12630
12631 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12632 {
12633         u32 nvcfg1, protect = 0;
12634
12635         nvcfg1 = tr32(NVRAM_CFG1);
12636
12637         /* NVRAM protection for TPM */
12638         if (nvcfg1 & (1 << 27)) {
12639                 tg3_flag_set(tp, PROTECTED_NVRAM);
12640                 protect = 1;
12641         }
12642
12643         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12644         switch (nvcfg1) {
12645         case FLASH_5761VENDOR_ATMEL_ADB021D:
12646         case FLASH_5761VENDOR_ATMEL_ADB041D:
12647         case FLASH_5761VENDOR_ATMEL_ADB081D:
12648         case FLASH_5761VENDOR_ATMEL_ADB161D:
12649         case FLASH_5761VENDOR_ATMEL_MDB021D:
12650         case FLASH_5761VENDOR_ATMEL_MDB041D:
12651         case FLASH_5761VENDOR_ATMEL_MDB081D:
12652         case FLASH_5761VENDOR_ATMEL_MDB161D:
12653                 tp->nvram_jedecnum = JEDEC_ATMEL;
12654                 tg3_flag_set(tp, NVRAM_BUFFERED);
12655                 tg3_flag_set(tp, FLASH);
12656                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12657                 tp->nvram_pagesize = 256;
12658                 break;
12659         case FLASH_5761VENDOR_ST_A_M45PE20:
12660         case FLASH_5761VENDOR_ST_A_M45PE40:
12661         case FLASH_5761VENDOR_ST_A_M45PE80:
12662         case FLASH_5761VENDOR_ST_A_M45PE16:
12663         case FLASH_5761VENDOR_ST_M_M45PE20:
12664         case FLASH_5761VENDOR_ST_M_M45PE40:
12665         case FLASH_5761VENDOR_ST_M_M45PE80:
12666         case FLASH_5761VENDOR_ST_M_M45PE16:
12667                 tp->nvram_jedecnum = JEDEC_ST;
12668                 tg3_flag_set(tp, NVRAM_BUFFERED);
12669                 tg3_flag_set(tp, FLASH);
12670                 tp->nvram_pagesize = 256;
12671                 break;
12672         }
12673
12674         if (protect) {
12675                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12676         } else {
12677                 switch (nvcfg1) {
12678                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12679                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12680                 case FLASH_5761VENDOR_ST_A_M45PE16:
12681                 case FLASH_5761VENDOR_ST_M_M45PE16:
12682                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12683                         break;
12684                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12685                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12686                 case FLASH_5761VENDOR_ST_A_M45PE80:
12687                 case FLASH_5761VENDOR_ST_M_M45PE80:
12688                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12689                         break;
12690                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12691                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12692                 case FLASH_5761VENDOR_ST_A_M45PE40:
12693                 case FLASH_5761VENDOR_ST_M_M45PE40:
12694                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12695                         break;
12696                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12697                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12698                 case FLASH_5761VENDOR_ST_A_M45PE20:
12699                 case FLASH_5761VENDOR_ST_M_M45PE20:
12700                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12701                         break;
12702                 }
12703         }
12704 }
12705
12706 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12707 {
12708         tp->nvram_jedecnum = JEDEC_ATMEL;
12709         tg3_flag_set(tp, NVRAM_BUFFERED);
12710         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12711 }
12712
12713 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12714 {
12715         u32 nvcfg1;
12716
12717         nvcfg1 = tr32(NVRAM_CFG1);
12718
12719         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12720         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12721         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12722                 tp->nvram_jedecnum = JEDEC_ATMEL;
12723                 tg3_flag_set(tp, NVRAM_BUFFERED);
12724                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12725
12726                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12727                 tw32(NVRAM_CFG1, nvcfg1);
12728                 return;
12729         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12730         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12731         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12732         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12733         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12734         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12735         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12736                 tp->nvram_jedecnum = JEDEC_ATMEL;
12737                 tg3_flag_set(tp, NVRAM_BUFFERED);
12738                 tg3_flag_set(tp, FLASH);
12739
12740                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12741                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12742                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12743                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12744                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12745                         break;
12746                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12747                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12748                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12749                         break;
12750                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12751                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12752                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12753                         break;
12754                 }
12755                 break;
12756         case FLASH_5752VENDOR_ST_M45PE10:
12757         case FLASH_5752VENDOR_ST_M45PE20:
12758         case FLASH_5752VENDOR_ST_M45PE40:
12759                 tp->nvram_jedecnum = JEDEC_ST;
12760                 tg3_flag_set(tp, NVRAM_BUFFERED);
12761                 tg3_flag_set(tp, FLASH);
12762
12763                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12764                 case FLASH_5752VENDOR_ST_M45PE10:
12765                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12766                         break;
12767                 case FLASH_5752VENDOR_ST_M45PE20:
12768                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12769                         break;
12770                 case FLASH_5752VENDOR_ST_M45PE40:
12771                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12772                         break;
12773                 }
12774                 break;
12775         default:
12776                 tg3_flag_set(tp, NO_NVRAM);
12777                 return;
12778         }
12779
12780         tg3_nvram_get_pagesize(tp, nvcfg1);
12781         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12782                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12783 }
12784
12785
12786 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12787 {
12788         u32 nvcfg1;
12789
12790         nvcfg1 = tr32(NVRAM_CFG1);
12791
12792         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12793         case FLASH_5717VENDOR_ATMEL_EEPROM:
12794         case FLASH_5717VENDOR_MICRO_EEPROM:
12795                 tp->nvram_jedecnum = JEDEC_ATMEL;
12796                 tg3_flag_set(tp, NVRAM_BUFFERED);
12797                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12798
12799                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12800                 tw32(NVRAM_CFG1, nvcfg1);
12801                 return;
12802         case FLASH_5717VENDOR_ATMEL_MDB011D:
12803         case FLASH_5717VENDOR_ATMEL_ADB011B:
12804         case FLASH_5717VENDOR_ATMEL_ADB011D:
12805         case FLASH_5717VENDOR_ATMEL_MDB021D:
12806         case FLASH_5717VENDOR_ATMEL_ADB021B:
12807         case FLASH_5717VENDOR_ATMEL_ADB021D:
12808         case FLASH_5717VENDOR_ATMEL_45USPT:
12809                 tp->nvram_jedecnum = JEDEC_ATMEL;
12810                 tg3_flag_set(tp, NVRAM_BUFFERED);
12811                 tg3_flag_set(tp, FLASH);
12812
12813                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12814                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12815                         /* Detect size with tg3_nvram_get_size() */
12816                         break;
12817                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12818                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12819                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12820                         break;
12821                 default:
12822                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12823                         break;
12824                 }
12825                 break;
12826         case FLASH_5717VENDOR_ST_M_M25PE10:
12827         case FLASH_5717VENDOR_ST_A_M25PE10:
12828         case FLASH_5717VENDOR_ST_M_M45PE10:
12829         case FLASH_5717VENDOR_ST_A_M45PE10:
12830         case FLASH_5717VENDOR_ST_M_M25PE20:
12831         case FLASH_5717VENDOR_ST_A_M25PE20:
12832         case FLASH_5717VENDOR_ST_M_M45PE20:
12833         case FLASH_5717VENDOR_ST_A_M45PE20:
12834         case FLASH_5717VENDOR_ST_25USPT:
12835         case FLASH_5717VENDOR_ST_45USPT:
12836                 tp->nvram_jedecnum = JEDEC_ST;
12837                 tg3_flag_set(tp, NVRAM_BUFFERED);
12838                 tg3_flag_set(tp, FLASH);
12839
12840                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12841                 case FLASH_5717VENDOR_ST_M_M25PE20:
12842                 case FLASH_5717VENDOR_ST_M_M45PE20:
12843                         /* Detect size with tg3_nvram_get_size() */
12844                         break;
12845                 case FLASH_5717VENDOR_ST_A_M25PE20:
12846                 case FLASH_5717VENDOR_ST_A_M45PE20:
12847                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12848                         break;
12849                 default:
12850                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12851                         break;
12852                 }
12853                 break;
12854         default:
12855                 tg3_flag_set(tp, NO_NVRAM);
12856                 return;
12857         }
12858
12859         tg3_nvram_get_pagesize(tp, nvcfg1);
12860         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12861                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12862 }
12863
12864 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12865 {
12866         u32 nvcfg1, nvmpinstrp;
12867
12868         nvcfg1 = tr32(NVRAM_CFG1);
12869         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12870
12871         switch (nvmpinstrp) {
12872         case FLASH_5720_EEPROM_HD:
12873         case FLASH_5720_EEPROM_LD:
12874                 tp->nvram_jedecnum = JEDEC_ATMEL;
12875                 tg3_flag_set(tp, NVRAM_BUFFERED);
12876
12877                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12878                 tw32(NVRAM_CFG1, nvcfg1);
12879                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12880                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12881                 else
12882                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12883                 return;
12884         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12885         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12886         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12887         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12888         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12889         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12890         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12891         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12892         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12893         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12894         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12895         case FLASH_5720VENDOR_ATMEL_45USPT:
12896                 tp->nvram_jedecnum = JEDEC_ATMEL;
12897                 tg3_flag_set(tp, NVRAM_BUFFERED);
12898                 tg3_flag_set(tp, FLASH);
12899
12900                 switch (nvmpinstrp) {
12901                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12902                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12903                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12904                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12905                         break;
12906                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12907                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12908                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12909                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12910                         break;
12911                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12912                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12913                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12914                         break;
12915                 default:
12916                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12917                         break;
12918                 }
12919                 break;
12920         case FLASH_5720VENDOR_M_ST_M25PE10:
12921         case FLASH_5720VENDOR_M_ST_M45PE10:
12922         case FLASH_5720VENDOR_A_ST_M25PE10:
12923         case FLASH_5720VENDOR_A_ST_M45PE10:
12924         case FLASH_5720VENDOR_M_ST_M25PE20:
12925         case FLASH_5720VENDOR_M_ST_M45PE20:
12926         case FLASH_5720VENDOR_A_ST_M25PE20:
12927         case FLASH_5720VENDOR_A_ST_M45PE20:
12928         case FLASH_5720VENDOR_M_ST_M25PE40:
12929         case FLASH_5720VENDOR_M_ST_M45PE40:
12930         case FLASH_5720VENDOR_A_ST_M25PE40:
12931         case FLASH_5720VENDOR_A_ST_M45PE40:
12932         case FLASH_5720VENDOR_M_ST_M25PE80:
12933         case FLASH_5720VENDOR_M_ST_M45PE80:
12934         case FLASH_5720VENDOR_A_ST_M25PE80:
12935         case FLASH_5720VENDOR_A_ST_M45PE80:
12936         case FLASH_5720VENDOR_ST_25USPT:
12937         case FLASH_5720VENDOR_ST_45USPT:
12938                 tp->nvram_jedecnum = JEDEC_ST;
12939                 tg3_flag_set(tp, NVRAM_BUFFERED);
12940                 tg3_flag_set(tp, FLASH);
12941
12942                 switch (nvmpinstrp) {
12943                 case FLASH_5720VENDOR_M_ST_M25PE20:
12944                 case FLASH_5720VENDOR_M_ST_M45PE20:
12945                 case FLASH_5720VENDOR_A_ST_M25PE20:
12946                 case FLASH_5720VENDOR_A_ST_M45PE20:
12947                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12948                         break;
12949                 case FLASH_5720VENDOR_M_ST_M25PE40:
12950                 case FLASH_5720VENDOR_M_ST_M45PE40:
12951                 case FLASH_5720VENDOR_A_ST_M25PE40:
12952                 case FLASH_5720VENDOR_A_ST_M45PE40:
12953                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12954                         break;
12955                 case FLASH_5720VENDOR_M_ST_M25PE80:
12956                 case FLASH_5720VENDOR_M_ST_M45PE80:
12957                 case FLASH_5720VENDOR_A_ST_M25PE80:
12958                 case FLASH_5720VENDOR_A_ST_M45PE80:
12959                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12960                         break;
12961                 default:
12962                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12963                         break;
12964                 }
12965                 break;
12966         default:
12967                 tg3_flag_set(tp, NO_NVRAM);
12968                 return;
12969         }
12970
12971         tg3_nvram_get_pagesize(tp, nvcfg1);
12972         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12973                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12974 }
12975
12976 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12977 static void __devinit tg3_nvram_init(struct tg3 *tp)
12978 {
12979         tw32_f(GRC_EEPROM_ADDR,
12980              (EEPROM_ADDR_FSM_RESET |
12981               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12982                EEPROM_ADDR_CLKPERD_SHIFT)));
12983
12984         msleep(1);
12985
12986         /* Enable seeprom accesses. */
12987         tw32_f(GRC_LOCAL_CTRL,
12988              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12989         udelay(100);
12990
12991         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12992             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12993                 tg3_flag_set(tp, NVRAM);
12994
12995                 if (tg3_nvram_lock(tp)) {
12996                         netdev_warn(tp->dev,
12997                                     "Cannot get nvram lock, %s failed\n",
12998                                     __func__);
12999                         return;
13000                 }
13001                 tg3_enable_nvram_access(tp);
13002
13003                 tp->nvram_size = 0;
13004
13005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13006                         tg3_get_5752_nvram_info(tp);
13007                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13008                         tg3_get_5755_nvram_info(tp);
13009                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13010                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13011                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13012                         tg3_get_5787_nvram_info(tp);
13013                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13014                         tg3_get_5761_nvram_info(tp);
13015                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13016                         tg3_get_5906_nvram_info(tp);
13017                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13018                          tg3_flag(tp, 57765_CLASS))
13019                         tg3_get_57780_nvram_info(tp);
13020                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13021                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13022                         tg3_get_5717_nvram_info(tp);
13023                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13024                         tg3_get_5720_nvram_info(tp);
13025                 else
13026                         tg3_get_nvram_info(tp);
13027
13028                 if (tp->nvram_size == 0)
13029                         tg3_get_nvram_size(tp);
13030
13031                 tg3_disable_nvram_access(tp);
13032                 tg3_nvram_unlock(tp);
13033
13034         } else {
13035                 tg3_flag_clear(tp, NVRAM);
13036                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13037
13038                 tg3_get_eeprom_size(tp);
13039         }
13040 }
13041
13042 struct subsys_tbl_ent {
13043         u16 subsys_vendor, subsys_devid;
13044         u32 phy_id;
13045 };
13046
13047 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13048         /* Broadcom boards. */
13049         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13050           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13051         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13052           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13053         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13054           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13055         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13056           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13057         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13058           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13059         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13060           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13061         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13062           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13063         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13064           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13065         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13066           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13067         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13068           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13069         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13070           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13071
13072         /* 3com boards. */
13073         { TG3PCI_SUBVENDOR_ID_3COM,
13074           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13075         { TG3PCI_SUBVENDOR_ID_3COM,
13076           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13077         { TG3PCI_SUBVENDOR_ID_3COM,
13078           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13079         { TG3PCI_SUBVENDOR_ID_3COM,
13080           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13081         { TG3PCI_SUBVENDOR_ID_3COM,
13082           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13083
13084         /* DELL boards. */
13085         { TG3PCI_SUBVENDOR_ID_DELL,
13086           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13087         { TG3PCI_SUBVENDOR_ID_DELL,
13088           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13089         { TG3PCI_SUBVENDOR_ID_DELL,
13090           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13091         { TG3PCI_SUBVENDOR_ID_DELL,
13092           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13093
13094         /* Compaq boards. */
13095         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13096           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13097         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13098           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13099         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13100           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13101         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13102           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13103         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13104           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13105
13106         /* IBM boards. */
13107         { TG3PCI_SUBVENDOR_ID_IBM,
13108           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13109 };
13110
13111 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13112 {
13113         int i;
13114
13115         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13116                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13117                      tp->pdev->subsystem_vendor) &&
13118                     (subsys_id_to_phy_id[i].subsys_devid ==
13119                      tp->pdev->subsystem_device))
13120                         return &subsys_id_to_phy_id[i];
13121         }
13122         return NULL;
13123 }
13124
13125 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13126 {
13127         u32 val;
13128
13129         tp->phy_id = TG3_PHY_ID_INVALID;
13130         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13131
13132         /* Assume an onboard device and WOL capable by default.  */
13133         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13134         tg3_flag_set(tp, WOL_CAP);
13135
13136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13137                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13138                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13139                         tg3_flag_set(tp, IS_NIC);
13140                 }
13141                 val = tr32(VCPU_CFGSHDW);
13142                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13143                         tg3_flag_set(tp, ASPM_WORKAROUND);
13144                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13145                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13146                         tg3_flag_set(tp, WOL_ENABLE);
13147                         device_set_wakeup_enable(&tp->pdev->dev, true);
13148                 }
13149                 goto done;
13150         }
13151
13152         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13153         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13154                 u32 nic_cfg, led_cfg;
13155                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13156                 int eeprom_phy_serdes = 0;
13157
13158                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13159                 tp->nic_sram_data_cfg = nic_cfg;
13160
13161                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13162                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13163                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13164                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13165                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13166                     (ver > 0) && (ver < 0x100))
13167                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13168
13169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13170                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13171
13172                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13173                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13174                         eeprom_phy_serdes = 1;
13175
13176                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13177                 if (nic_phy_id != 0) {
13178                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13179                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13180
13181                         eeprom_phy_id  = (id1 >> 16) << 10;
13182                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13183                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13184                 } else
13185                         eeprom_phy_id = 0;
13186
13187                 tp->phy_id = eeprom_phy_id;
13188                 if (eeprom_phy_serdes) {
13189                         if (!tg3_flag(tp, 5705_PLUS))
13190                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13191                         else
13192                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13193                 }
13194
13195                 if (tg3_flag(tp, 5750_PLUS))
13196                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13197                                     SHASTA_EXT_LED_MODE_MASK);
13198                 else
13199                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13200
13201                 switch (led_cfg) {
13202                 default:
13203                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13204                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13205                         break;
13206
13207                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13208                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13209                         break;
13210
13211                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13212                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13213
13214                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13215                          * read on some older 5700/5701 bootcode.
13216                          */
13217                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13218                             ASIC_REV_5700 ||
13219                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13220                             ASIC_REV_5701)
13221                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13222
13223                         break;
13224
13225                 case SHASTA_EXT_LED_SHARED:
13226                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13227                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13228                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13229                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13230                                                  LED_CTRL_MODE_PHY_2);
13231                         break;
13232
13233                 case SHASTA_EXT_LED_MAC:
13234                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13235                         break;
13236
13237                 case SHASTA_EXT_LED_COMBO:
13238                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13239                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13240                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13241                                                  LED_CTRL_MODE_PHY_2);
13242                         break;
13243
13244                 }
13245
13246                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13247                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13248                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13249                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13250
13251                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13252                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13253
13254                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13255                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13256                         if ((tp->pdev->subsystem_vendor ==
13257                              PCI_VENDOR_ID_ARIMA) &&
13258                             (tp->pdev->subsystem_device == 0x205a ||
13259                              tp->pdev->subsystem_device == 0x2063))
13260                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13261                 } else {
13262                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13263                         tg3_flag_set(tp, IS_NIC);
13264                 }
13265
13266                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13267                         tg3_flag_set(tp, ENABLE_ASF);
13268                         if (tg3_flag(tp, 5750_PLUS))
13269                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13270                 }
13271
13272                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13273                     tg3_flag(tp, 5750_PLUS))
13274                         tg3_flag_set(tp, ENABLE_APE);
13275
13276                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13277                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13278                         tg3_flag_clear(tp, WOL_CAP);
13279
13280                 if (tg3_flag(tp, WOL_CAP) &&
13281                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13282                         tg3_flag_set(tp, WOL_ENABLE);
13283                         device_set_wakeup_enable(&tp->pdev->dev, true);
13284                 }
13285
13286                 if (cfg2 & (1 << 17))
13287                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13288
13289                 /* serdes signal pre-emphasis in register 0x590 set by */
13290                 /* bootcode if bit 18 is set */
13291                 if (cfg2 & (1 << 18))
13292                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13293
13294                 if ((tg3_flag(tp, 57765_PLUS) ||
13295                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13296                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13297                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13298                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13299
13300                 if (tg3_flag(tp, PCI_EXPRESS) &&
13301                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13302                     !tg3_flag(tp, 57765_PLUS)) {
13303                         u32 cfg3;
13304
13305                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13306                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13307                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13308                 }
13309
13310                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13311                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13312                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13313                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13314                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13315                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13316         }
13317 done:
13318         if (tg3_flag(tp, WOL_CAP))
13319                 device_set_wakeup_enable(&tp->pdev->dev,
13320                                          tg3_flag(tp, WOL_ENABLE));
13321         else
13322                 device_set_wakeup_capable(&tp->pdev->dev, false);
13323 }
13324
13325 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13326 {
13327         int i;
13328         u32 val;
13329
13330         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13331         tw32(OTP_CTRL, cmd);
13332
13333         /* Wait for up to 1 ms for command to execute. */
13334         for (i = 0; i < 100; i++) {
13335                 val = tr32(OTP_STATUS);
13336                 if (val & OTP_STATUS_CMD_DONE)
13337                         break;
13338                 udelay(10);
13339         }
13340
13341         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13342 }
13343
13344 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13345  * configuration is a 32-bit value that straddles the alignment boundary.
13346  * We do two 32-bit reads and then shift and merge the results.
13347  */
13348 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13349 {
13350         u32 bhalf_otp, thalf_otp;
13351
13352         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13353
13354         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13355                 return 0;
13356
13357         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13358
13359         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13360                 return 0;
13361
13362         thalf_otp = tr32(OTP_READ_DATA);
13363
13364         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13365
13366         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13367                 return 0;
13368
13369         bhalf_otp = tr32(OTP_READ_DATA);
13370
13371         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13372 }
13373
13374 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13375 {
13376         u32 adv = ADVERTISED_Autoneg;
13377
13378         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13379                 adv |= ADVERTISED_1000baseT_Half |
13380                        ADVERTISED_1000baseT_Full;
13381
13382         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13383                 adv |= ADVERTISED_100baseT_Half |
13384                        ADVERTISED_100baseT_Full |
13385                        ADVERTISED_10baseT_Half |
13386                        ADVERTISED_10baseT_Full |
13387                        ADVERTISED_TP;
13388         else
13389                 adv |= ADVERTISED_FIBRE;
13390
13391         tp->link_config.advertising = adv;
13392         tp->link_config.speed = SPEED_UNKNOWN;
13393         tp->link_config.duplex = DUPLEX_UNKNOWN;
13394         tp->link_config.autoneg = AUTONEG_ENABLE;
13395         tp->link_config.active_speed = SPEED_UNKNOWN;
13396         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13397
13398         tp->old_link = -1;
13399 }
13400
13401 static int __devinit tg3_phy_probe(struct tg3 *tp)
13402 {
13403         u32 hw_phy_id_1, hw_phy_id_2;
13404         u32 hw_phy_id, hw_phy_id_masked;
13405         int err;
13406
13407         /* flow control autonegotiation is default behavior */
13408         tg3_flag_set(tp, PAUSE_AUTONEG);
13409         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13410
13411         if (tg3_flag(tp, USE_PHYLIB))
13412                 return tg3_phy_init(tp);
13413
13414         /* Reading the PHY ID register can conflict with ASF
13415          * firmware access to the PHY hardware.
13416          */
13417         err = 0;
13418         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13419                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13420         } else {
13421                 /* Now read the physical PHY_ID from the chip and verify
13422                  * that it is sane.  If it doesn't look good, we fall back
13423                  * to either the hard-coded table based PHY_ID and failing
13424                  * that the value found in the eeprom area.
13425                  */
13426                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13427                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13428
13429                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13430                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13431                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13432
13433                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13434         }
13435
13436         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13437                 tp->phy_id = hw_phy_id;
13438                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13439                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13440                 else
13441                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13442         } else {
13443                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13444                         /* Do nothing, phy ID already set up in
13445                          * tg3_get_eeprom_hw_cfg().
13446                          */
13447                 } else {
13448                         struct subsys_tbl_ent *p;
13449
13450                         /* No eeprom signature?  Try the hardcoded
13451                          * subsys device table.
13452                          */
13453                         p = tg3_lookup_by_subsys(tp);
13454                         if (!p)
13455                                 return -ENODEV;
13456
13457                         tp->phy_id = p->phy_id;
13458                         if (!tp->phy_id ||
13459                             tp->phy_id == TG3_PHY_ID_BCM8002)
13460                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13461                 }
13462         }
13463
13464         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13465             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13466              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13467              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13468               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13469              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13470               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13471                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13472
13473         tg3_phy_init_link_config(tp);
13474
13475         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13476             !tg3_flag(tp, ENABLE_APE) &&
13477             !tg3_flag(tp, ENABLE_ASF)) {
13478                 u32 bmsr, dummy;
13479
13480                 tg3_readphy(tp, MII_BMSR, &bmsr);
13481                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13482                     (bmsr & BMSR_LSTATUS))
13483                         goto skip_phy_reset;
13484
13485                 err = tg3_phy_reset(tp);
13486                 if (err)
13487                         return err;
13488
13489                 tg3_phy_set_wirespeed(tp);
13490
13491                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13492                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13493                                             tp->link_config.flowctrl);
13494
13495                         tg3_writephy(tp, MII_BMCR,
13496                                      BMCR_ANENABLE | BMCR_ANRESTART);
13497                 }
13498         }
13499
13500 skip_phy_reset:
13501         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13502                 err = tg3_init_5401phy_dsp(tp);
13503                 if (err)
13504                         return err;
13505
13506                 err = tg3_init_5401phy_dsp(tp);
13507         }
13508
13509         return err;
13510 }
13511
13512 static void __devinit tg3_read_vpd(struct tg3 *tp)
13513 {
13514         u8 *vpd_data;
13515         unsigned int block_end, rosize, len;
13516         u32 vpdlen;
13517         int j, i = 0;
13518
13519         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13520         if (!vpd_data)
13521                 goto out_no_vpd;
13522
13523         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13524         if (i < 0)
13525                 goto out_not_found;
13526
13527         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13528         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13529         i += PCI_VPD_LRDT_TAG_SIZE;
13530
13531         if (block_end > vpdlen)
13532                 goto out_not_found;
13533
13534         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13535                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13536         if (j > 0) {
13537                 len = pci_vpd_info_field_size(&vpd_data[j]);
13538
13539                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13540                 if (j + len > block_end || len != 4 ||
13541                     memcmp(&vpd_data[j], "1028", 4))
13542                         goto partno;
13543
13544                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13545                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13546                 if (j < 0)
13547                         goto partno;
13548
13549                 len = pci_vpd_info_field_size(&vpd_data[j]);
13550
13551                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13552                 if (j + len > block_end)
13553                         goto partno;
13554
13555                 memcpy(tp->fw_ver, &vpd_data[j], len);
13556                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13557         }
13558
13559 partno:
13560         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13561                                       PCI_VPD_RO_KEYWORD_PARTNO);
13562         if (i < 0)
13563                 goto out_not_found;
13564
13565         len = pci_vpd_info_field_size(&vpd_data[i]);
13566
13567         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13568         if (len > TG3_BPN_SIZE ||
13569             (len + i) > vpdlen)
13570                 goto out_not_found;
13571
13572         memcpy(tp->board_part_number, &vpd_data[i], len);
13573
13574 out_not_found:
13575         kfree(vpd_data);
13576         if (tp->board_part_number[0])
13577                 return;
13578
13579 out_no_vpd:
13580         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13581                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13582                         strcpy(tp->board_part_number, "BCM5717");
13583                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13584                         strcpy(tp->board_part_number, "BCM5718");
13585                 else
13586                         goto nomatch;
13587         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13588                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13589                         strcpy(tp->board_part_number, "BCM57780");
13590                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13591                         strcpy(tp->board_part_number, "BCM57760");
13592                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13593                         strcpy(tp->board_part_number, "BCM57790");
13594                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13595                         strcpy(tp->board_part_number, "BCM57788");
13596                 else
13597                         goto nomatch;
13598         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13599                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13600                         strcpy(tp->board_part_number, "BCM57761");
13601                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13602                         strcpy(tp->board_part_number, "BCM57765");
13603                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13604                         strcpy(tp->board_part_number, "BCM57781");
13605                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13606                         strcpy(tp->board_part_number, "BCM57785");
13607                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13608                         strcpy(tp->board_part_number, "BCM57791");
13609                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13610                         strcpy(tp->board_part_number, "BCM57795");
13611                 else
13612                         goto nomatch;
13613         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13614                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13615                         strcpy(tp->board_part_number, "BCM57762");
13616                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13617                         strcpy(tp->board_part_number, "BCM57766");
13618                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13619                         strcpy(tp->board_part_number, "BCM57782");
13620                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13621                         strcpy(tp->board_part_number, "BCM57786");
13622                 else
13623                         goto nomatch;
13624         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13625                 strcpy(tp->board_part_number, "BCM95906");
13626         } else {
13627 nomatch:
13628                 strcpy(tp->board_part_number, "none");
13629         }
13630 }
13631
13632 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13633 {
13634         u32 val;
13635
13636         if (tg3_nvram_read(tp, offset, &val) ||
13637             (val & 0xfc000000) != 0x0c000000 ||
13638             tg3_nvram_read(tp, offset + 4, &val) ||
13639             val != 0)
13640                 return 0;
13641
13642         return 1;
13643 }
13644
13645 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13646 {
13647         u32 val, offset, start, ver_offset;
13648         int i, dst_off;
13649         bool newver = false;
13650
13651         if (tg3_nvram_read(tp, 0xc, &offset) ||
13652             tg3_nvram_read(tp, 0x4, &start))
13653                 return;
13654
13655         offset = tg3_nvram_logical_addr(tp, offset);
13656
13657         if (tg3_nvram_read(tp, offset, &val))
13658                 return;
13659
13660         if ((val & 0xfc000000) == 0x0c000000) {
13661                 if (tg3_nvram_read(tp, offset + 4, &val))
13662                         return;
13663
13664                 if (val == 0)
13665                         newver = true;
13666         }
13667
13668         dst_off = strlen(tp->fw_ver);
13669
13670         if (newver) {
13671                 if (TG3_VER_SIZE - dst_off < 16 ||
13672                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13673                         return;
13674
13675                 offset = offset + ver_offset - start;
13676                 for (i = 0; i < 16; i += 4) {
13677                         __be32 v;
13678                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13679                                 return;
13680
13681                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13682                 }
13683         } else {
13684                 u32 major, minor;
13685
13686                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13687                         return;
13688
13689                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13690                         TG3_NVM_BCVER_MAJSFT;
13691                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13692                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13693                          "v%d.%02d", major, minor);
13694         }
13695 }
13696
13697 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13698 {
13699         u32 val, major, minor;
13700
13701         /* Use native endian representation */
13702         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13703                 return;
13704
13705         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13706                 TG3_NVM_HWSB_CFG1_MAJSFT;
13707         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13708                 TG3_NVM_HWSB_CFG1_MINSFT;
13709
13710         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13711 }
13712
13713 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13714 {
13715         u32 offset, major, minor, build;
13716
13717         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13718
13719         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13720                 return;
13721
13722         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13723         case TG3_EEPROM_SB_REVISION_0:
13724                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13725                 break;
13726         case TG3_EEPROM_SB_REVISION_2:
13727                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13728                 break;
13729         case TG3_EEPROM_SB_REVISION_3:
13730                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13731                 break;
13732         case TG3_EEPROM_SB_REVISION_4:
13733                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13734                 break;
13735         case TG3_EEPROM_SB_REVISION_5:
13736                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13737                 break;
13738         case TG3_EEPROM_SB_REVISION_6:
13739                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13740                 break;
13741         default:
13742                 return;
13743         }
13744
13745         if (tg3_nvram_read(tp, offset, &val))
13746                 return;
13747
13748         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13749                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13750         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13751                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13752         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13753
13754         if (minor > 99 || build > 26)
13755                 return;
13756
13757         offset = strlen(tp->fw_ver);
13758         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13759                  " v%d.%02d", major, minor);
13760
13761         if (build > 0) {
13762                 offset = strlen(tp->fw_ver);
13763                 if (offset < TG3_VER_SIZE - 1)
13764                         tp->fw_ver[offset] = 'a' + build - 1;
13765         }
13766 }
13767
13768 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13769 {
13770         u32 val, offset, start;
13771         int i, vlen;
13772
13773         for (offset = TG3_NVM_DIR_START;
13774              offset < TG3_NVM_DIR_END;
13775              offset += TG3_NVM_DIRENT_SIZE) {
13776                 if (tg3_nvram_read(tp, offset, &val))
13777                         return;
13778
13779                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13780                         break;
13781         }
13782
13783         if (offset == TG3_NVM_DIR_END)
13784                 return;
13785
13786         if (!tg3_flag(tp, 5705_PLUS))
13787                 start = 0x08000000;
13788         else if (tg3_nvram_read(tp, offset - 4, &start))
13789                 return;
13790
13791         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13792             !tg3_fw_img_is_valid(tp, offset) ||
13793             tg3_nvram_read(tp, offset + 8, &val))
13794                 return;
13795
13796         offset += val - start;
13797
13798         vlen = strlen(tp->fw_ver);
13799
13800         tp->fw_ver[vlen++] = ',';
13801         tp->fw_ver[vlen++] = ' ';
13802
13803         for (i = 0; i < 4; i++) {
13804                 __be32 v;
13805                 if (tg3_nvram_read_be32(tp, offset, &v))
13806                         return;
13807
13808                 offset += sizeof(v);
13809
13810                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13811                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13812                         break;
13813                 }
13814
13815                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13816                 vlen += sizeof(v);
13817         }
13818 }
13819
13820 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13821 {
13822         int vlen;
13823         u32 apedata;
13824         char *fwtype;
13825
13826         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13827                 return;
13828
13829         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13830         if (apedata != APE_SEG_SIG_MAGIC)
13831                 return;
13832
13833         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13834         if (!(apedata & APE_FW_STATUS_READY))
13835                 return;
13836
13837         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13838
13839         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13840                 tg3_flag_set(tp, APE_HAS_NCSI);
13841                 fwtype = "NCSI";
13842         } else {
13843                 fwtype = "DASH";
13844         }
13845
13846         vlen = strlen(tp->fw_ver);
13847
13848         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13849                  fwtype,
13850                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13851                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13852                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13853                  (apedata & APE_FW_VERSION_BLDMSK));
13854 }
13855
13856 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13857 {
13858         u32 val;
13859         bool vpd_vers = false;
13860
13861         if (tp->fw_ver[0] != 0)
13862                 vpd_vers = true;
13863
13864         if (tg3_flag(tp, NO_NVRAM)) {
13865                 strcat(tp->fw_ver, "sb");
13866                 return;
13867         }
13868
13869         if (tg3_nvram_read(tp, 0, &val))
13870                 return;
13871
13872         if (val == TG3_EEPROM_MAGIC)
13873                 tg3_read_bc_ver(tp);
13874         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13875                 tg3_read_sb_ver(tp, val);
13876         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13877                 tg3_read_hwsb_ver(tp);
13878         else
13879                 return;
13880
13881         if (vpd_vers)
13882                 goto done;
13883
13884         if (tg3_flag(tp, ENABLE_APE)) {
13885                 if (tg3_flag(tp, ENABLE_ASF))
13886                         tg3_read_dash_ver(tp);
13887         } else if (tg3_flag(tp, ENABLE_ASF)) {
13888                 tg3_read_mgmtfw_ver(tp);
13889         }
13890
13891 done:
13892         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13893 }
13894
13895 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13896 {
13897         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13898                 return TG3_RX_RET_MAX_SIZE_5717;
13899         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13900                 return TG3_RX_RET_MAX_SIZE_5700;
13901         else
13902                 return TG3_RX_RET_MAX_SIZE_5705;
13903 }
13904
13905 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13906         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13907         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13908         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13909         { },
13910 };
13911
13912 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13913 {
13914         struct pci_dev *peer;
13915         unsigned int func, devnr = tp->pdev->devfn & ~7;
13916
13917         for (func = 0; func < 8; func++) {
13918                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13919                 if (peer && peer != tp->pdev)
13920                         break;
13921                 pci_dev_put(peer);
13922         }
13923         /* 5704 can be configured in single-port mode, set peer to
13924          * tp->pdev in that case.
13925          */
13926         if (!peer) {
13927                 peer = tp->pdev;
13928                 return peer;
13929         }
13930
13931         /*
13932          * We don't need to keep the refcount elevated; there's no way
13933          * to remove one half of this device without removing the other
13934          */
13935         pci_dev_put(peer);
13936
13937         return peer;
13938 }
13939
13940 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13941 {
13942         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13944                 u32 reg;
13945
13946                 /* All devices that use the alternate
13947                  * ASIC REV location have a CPMU.
13948                  */
13949                 tg3_flag_set(tp, CPMU_PRESENT);
13950
13951                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13952                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13953                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13954                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13955                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13956                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13957                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13958                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13959                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13960                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13961                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13962                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13963                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13964                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13965                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13966                         reg = TG3PCI_GEN15_PRODID_ASICREV;
13967                 else
13968                         reg = TG3PCI_PRODID_ASICREV;
13969
13970                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13971         }
13972
13973         /* Wrong chip ID in 5752 A0. This code can be removed later
13974          * as A0 is not in production.
13975          */
13976         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13977                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13978
13979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13981             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13982                 tg3_flag_set(tp, 5717_PLUS);
13983
13984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13986                 tg3_flag_set(tp, 57765_CLASS);
13987
13988         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13989                 tg3_flag_set(tp, 57765_PLUS);
13990
13991         /* Intentionally exclude ASIC_REV_5906 */
13992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13993             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13996             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13998             tg3_flag(tp, 57765_PLUS))
13999                 tg3_flag_set(tp, 5755_PLUS);
14000
14001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14003                 tg3_flag_set(tp, 5780_CLASS);
14004
14005         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14006             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14007             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14008             tg3_flag(tp, 5755_PLUS) ||
14009             tg3_flag(tp, 5780_CLASS))
14010                 tg3_flag_set(tp, 5750_PLUS);
14011
14012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14013             tg3_flag(tp, 5750_PLUS))
14014                 tg3_flag_set(tp, 5705_PLUS);
14015 }
14016
14017 static int __devinit tg3_get_invariants(struct tg3 *tp)
14018 {
14019         u32 misc_ctrl_reg;
14020         u32 pci_state_reg, grc_misc_cfg;
14021         u32 val;
14022         u16 pci_cmd;
14023         int err;
14024
14025         /* Force memory write invalidate off.  If we leave it on,
14026          * then on 5700_BX chips we have to enable a workaround.
14027          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14028          * to match the cacheline size.  The Broadcom driver have this
14029          * workaround but turns MWI off all the times so never uses
14030          * it.  This seems to suggest that the workaround is insufficient.
14031          */
14032         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14033         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14034         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14035
14036         /* Important! -- Make sure register accesses are byteswapped
14037          * correctly.  Also, for those chips that require it, make
14038          * sure that indirect register accesses are enabled before
14039          * the first operation.
14040          */
14041         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14042                               &misc_ctrl_reg);
14043         tp->misc_host_ctrl |= (misc_ctrl_reg &
14044                                MISC_HOST_CTRL_CHIPREV);
14045         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14046                                tp->misc_host_ctrl);
14047
14048         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14049
14050         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14051          * we need to disable memory and use config. cycles
14052          * only to access all registers. The 5702/03 chips
14053          * can mistakenly decode the special cycles from the
14054          * ICH chipsets as memory write cycles, causing corruption
14055          * of register and memory space. Only certain ICH bridges
14056          * will drive special cycles with non-zero data during the
14057          * address phase which can fall within the 5703's address
14058          * range. This is not an ICH bug as the PCI spec allows
14059          * non-zero address during special cycles. However, only
14060          * these ICH bridges are known to drive non-zero addresses
14061          * during special cycles.
14062          *
14063          * Since special cycles do not cross PCI bridges, we only
14064          * enable this workaround if the 5703 is on the secondary
14065          * bus of these ICH bridges.
14066          */
14067         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14068             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14069                 static struct tg3_dev_id {
14070                         u32     vendor;
14071                         u32     device;
14072                         u32     rev;
14073                 } ich_chipsets[] = {
14074                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14075                           PCI_ANY_ID },
14076                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14077                           PCI_ANY_ID },
14078                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14079                           0xa },
14080                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14081                           PCI_ANY_ID },
14082                         { },
14083                 };
14084                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14085                 struct pci_dev *bridge = NULL;
14086
14087                 while (pci_id->vendor != 0) {
14088                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14089                                                 bridge);
14090                         if (!bridge) {
14091                                 pci_id++;
14092                                 continue;
14093                         }
14094                         if (pci_id->rev != PCI_ANY_ID) {
14095                                 if (bridge->revision > pci_id->rev)
14096                                         continue;
14097                         }
14098                         if (bridge->subordinate &&
14099                             (bridge->subordinate->number ==
14100                              tp->pdev->bus->number)) {
14101                                 tg3_flag_set(tp, ICH_WORKAROUND);
14102                                 pci_dev_put(bridge);
14103                                 break;
14104                         }
14105                 }
14106         }
14107
14108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14109                 static struct tg3_dev_id {
14110                         u32     vendor;
14111                         u32     device;
14112                 } bridge_chipsets[] = {
14113                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14114                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14115                         { },
14116                 };
14117                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14118                 struct pci_dev *bridge = NULL;
14119
14120                 while (pci_id->vendor != 0) {
14121                         bridge = pci_get_device(pci_id->vendor,
14122                                                 pci_id->device,
14123                                                 bridge);
14124                         if (!bridge) {
14125                                 pci_id++;
14126                                 continue;
14127                         }
14128                         if (bridge->subordinate &&
14129                             (bridge->subordinate->number <=
14130                              tp->pdev->bus->number) &&
14131                             (bridge->subordinate->subordinate >=
14132                              tp->pdev->bus->number)) {
14133                                 tg3_flag_set(tp, 5701_DMA_BUG);
14134                                 pci_dev_put(bridge);
14135                                 break;
14136                         }
14137                 }
14138         }
14139
14140         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14141          * DMA addresses > 40-bit. This bridge may have other additional
14142          * 57xx devices behind it in some 4-port NIC designs for example.
14143          * Any tg3 device found behind the bridge will also need the 40-bit
14144          * DMA workaround.
14145          */
14146         if (tg3_flag(tp, 5780_CLASS)) {
14147                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14148                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14149         } else {
14150                 struct pci_dev *bridge = NULL;
14151
14152                 do {
14153                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14154                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14155                                                 bridge);
14156                         if (bridge && bridge->subordinate &&
14157                             (bridge->subordinate->number <=
14158                              tp->pdev->bus->number) &&
14159                             (bridge->subordinate->subordinate >=
14160                              tp->pdev->bus->number)) {
14161                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14162                                 pci_dev_put(bridge);
14163                                 break;
14164                         }
14165                 } while (bridge);
14166         }
14167
14168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14169             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14170                 tp->pdev_peer = tg3_find_peer(tp);
14171
14172         /* Determine TSO capabilities */
14173         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14174                 ; /* Do nothing. HW bug. */
14175         else if (tg3_flag(tp, 57765_PLUS))
14176                 tg3_flag_set(tp, HW_TSO_3);
14177         else if (tg3_flag(tp, 5755_PLUS) ||
14178                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14179                 tg3_flag_set(tp, HW_TSO_2);
14180         else if (tg3_flag(tp, 5750_PLUS)) {
14181                 tg3_flag_set(tp, HW_TSO_1);
14182                 tg3_flag_set(tp, TSO_BUG);
14183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14184                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14185                         tg3_flag_clear(tp, TSO_BUG);
14186         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14187                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14188                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14189                         tg3_flag_set(tp, TSO_BUG);
14190                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14191                         tp->fw_needed = FIRMWARE_TG3TSO5;
14192                 else
14193                         tp->fw_needed = FIRMWARE_TG3TSO;
14194         }
14195
14196         /* Selectively allow TSO based on operating conditions */
14197         if (tg3_flag(tp, HW_TSO_1) ||
14198             tg3_flag(tp, HW_TSO_2) ||
14199             tg3_flag(tp, HW_TSO_3) ||
14200             tp->fw_needed) {
14201                 /* For firmware TSO, assume ASF is disabled.
14202                  * We'll disable TSO later if we discover ASF
14203                  * is enabled in tg3_get_eeprom_hw_cfg().
14204                  */
14205                 tg3_flag_set(tp, TSO_CAPABLE);
14206         } else {
14207                 tg3_flag_clear(tp, TSO_CAPABLE);
14208                 tg3_flag_clear(tp, TSO_BUG);
14209                 tp->fw_needed = NULL;
14210         }
14211
14212         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14213                 tp->fw_needed = FIRMWARE_TG3;
14214
14215         tp->irq_max = 1;
14216
14217         if (tg3_flag(tp, 5750_PLUS)) {
14218                 tg3_flag_set(tp, SUPPORT_MSI);
14219                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14220                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14221                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14222                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14223                      tp->pdev_peer == tp->pdev))
14224                         tg3_flag_clear(tp, SUPPORT_MSI);
14225
14226                 if (tg3_flag(tp, 5755_PLUS) ||
14227                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14228                         tg3_flag_set(tp, 1SHOT_MSI);
14229                 }
14230
14231                 if (tg3_flag(tp, 57765_PLUS)) {
14232                         tg3_flag_set(tp, SUPPORT_MSIX);
14233                         tp->irq_max = TG3_IRQ_MAX_VECS;
14234                         tg3_rss_init_dflt_indir_tbl(tp);
14235                 }
14236         }
14237
14238         if (tg3_flag(tp, 5755_PLUS))
14239                 tg3_flag_set(tp, SHORT_DMA_BUG);
14240
14241         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14242                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14243
14244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14245             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14247                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14248
14249         if (tg3_flag(tp, 57765_PLUS) &&
14250             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14251                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14252
14253         if (!tg3_flag(tp, 5705_PLUS) ||
14254             tg3_flag(tp, 5780_CLASS) ||
14255             tg3_flag(tp, USE_JUMBO_BDFLAG))
14256                 tg3_flag_set(tp, JUMBO_CAPABLE);
14257
14258         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14259                               &pci_state_reg);
14260
14261         if (pci_is_pcie(tp->pdev)) {
14262                 u16 lnkctl;
14263
14264                 tg3_flag_set(tp, PCI_EXPRESS);
14265
14266                 pci_read_config_word(tp->pdev,
14267                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14268                                      &lnkctl);
14269                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14270                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14271                             ASIC_REV_5906) {
14272                                 tg3_flag_clear(tp, HW_TSO_2);
14273                                 tg3_flag_clear(tp, TSO_CAPABLE);
14274                         }
14275                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14276                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14277                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14278                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14279                                 tg3_flag_set(tp, CLKREQ_BUG);
14280                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14281                         tg3_flag_set(tp, L1PLLPD_EN);
14282                 }
14283         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14284                 /* BCM5785 devices are effectively PCIe devices, and should
14285                  * follow PCIe codepaths, but do not have a PCIe capabilities
14286                  * section.
14287                  */
14288                 tg3_flag_set(tp, PCI_EXPRESS);
14289         } else if (!tg3_flag(tp, 5705_PLUS) ||
14290                    tg3_flag(tp, 5780_CLASS)) {
14291                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14292                 if (!tp->pcix_cap) {
14293                         dev_err(&tp->pdev->dev,
14294                                 "Cannot find PCI-X capability, aborting\n");
14295                         return -EIO;
14296                 }
14297
14298                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14299                         tg3_flag_set(tp, PCIX_MODE);
14300         }
14301
14302         /* If we have an AMD 762 or VIA K8T800 chipset, write
14303          * reordering to the mailbox registers done by the host
14304          * controller can cause major troubles.  We read back from
14305          * every mailbox register write to force the writes to be
14306          * posted to the chip in order.
14307          */
14308         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14309             !tg3_flag(tp, PCI_EXPRESS))
14310                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14311
14312         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14313                              &tp->pci_cacheline_sz);
14314         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14315                              &tp->pci_lat_timer);
14316         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14317             tp->pci_lat_timer < 64) {
14318                 tp->pci_lat_timer = 64;
14319                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14320                                       tp->pci_lat_timer);
14321         }
14322
14323         /* Important! -- It is critical that the PCI-X hw workaround
14324          * situation is decided before the first MMIO register access.
14325          */
14326         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14327                 /* 5700 BX chips need to have their TX producer index
14328                  * mailboxes written twice to workaround a bug.
14329                  */
14330                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14331
14332                 /* If we are in PCI-X mode, enable register write workaround.
14333                  *
14334                  * The workaround is to use indirect register accesses
14335                  * for all chip writes not to mailbox registers.
14336                  */
14337                 if (tg3_flag(tp, PCIX_MODE)) {
14338                         u32 pm_reg;
14339
14340                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14341
14342                         /* The chip can have it's power management PCI config
14343                          * space registers clobbered due to this bug.
14344                          * So explicitly force the chip into D0 here.
14345                          */
14346                         pci_read_config_dword(tp->pdev,
14347                                               tp->pm_cap + PCI_PM_CTRL,
14348                                               &pm_reg);
14349                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14350                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14351                         pci_write_config_dword(tp->pdev,
14352                                                tp->pm_cap + PCI_PM_CTRL,
14353                                                pm_reg);
14354
14355                         /* Also, force SERR#/PERR# in PCI command. */
14356                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14357                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14358                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14359                 }
14360         }
14361
14362         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14363                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14364         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14365                 tg3_flag_set(tp, PCI_32BIT);
14366
14367         /* Chip-specific fixup from Broadcom driver */
14368         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14369             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14370                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14371                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14372         }
14373
14374         /* Default fast path register access methods */
14375         tp->read32 = tg3_read32;
14376         tp->write32 = tg3_write32;
14377         tp->read32_mbox = tg3_read32;
14378         tp->write32_mbox = tg3_write32;
14379         tp->write32_tx_mbox = tg3_write32;
14380         tp->write32_rx_mbox = tg3_write32;
14381
14382         /* Various workaround register access methods */
14383         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14384                 tp->write32 = tg3_write_indirect_reg32;
14385         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14386                  (tg3_flag(tp, PCI_EXPRESS) &&
14387                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14388                 /*
14389                  * Back to back register writes can cause problems on these
14390                  * chips, the workaround is to read back all reg writes
14391                  * except those to mailbox regs.
14392                  *
14393                  * See tg3_write_indirect_reg32().
14394                  */
14395                 tp->write32 = tg3_write_flush_reg32;
14396         }
14397
14398         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14399                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14400                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14401                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14402         }
14403
14404         if (tg3_flag(tp, ICH_WORKAROUND)) {
14405                 tp->read32 = tg3_read_indirect_reg32;
14406                 tp->write32 = tg3_write_indirect_reg32;
14407                 tp->read32_mbox = tg3_read_indirect_mbox;
14408                 tp->write32_mbox = tg3_write_indirect_mbox;
14409                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14410                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14411
14412                 iounmap(tp->regs);
14413                 tp->regs = NULL;
14414
14415                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14416                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14417                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14418         }
14419         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14420                 tp->read32_mbox = tg3_read32_mbox_5906;
14421                 tp->write32_mbox = tg3_write32_mbox_5906;
14422                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14423                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14424         }
14425
14426         if (tp->write32 == tg3_write_indirect_reg32 ||
14427             (tg3_flag(tp, PCIX_MODE) &&
14428              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14429               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14430                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14431
14432         /* The memory arbiter has to be enabled in order for SRAM accesses
14433          * to succeed.  Normally on powerup the tg3 chip firmware will make
14434          * sure it is enabled, but other entities such as system netboot
14435          * code might disable it.
14436          */
14437         val = tr32(MEMARB_MODE);
14438         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14439
14440         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14442             tg3_flag(tp, 5780_CLASS)) {
14443                 if (tg3_flag(tp, PCIX_MODE)) {
14444                         pci_read_config_dword(tp->pdev,
14445                                               tp->pcix_cap + PCI_X_STATUS,
14446                                               &val);
14447                         tp->pci_fn = val & 0x7;
14448                 }
14449         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14450                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14451                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14452                     NIC_SRAM_CPMUSTAT_SIG) {
14453                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14454                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14455                 }
14456         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14457                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14458                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14459                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14460                     NIC_SRAM_CPMUSTAT_SIG) {
14461                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14462                                      TG3_CPMU_STATUS_FSHFT_5719;
14463                 }
14464         }
14465
14466         /* Get eeprom hw config before calling tg3_set_power_state().
14467          * In particular, the TG3_FLAG_IS_NIC flag must be
14468          * determined before calling tg3_set_power_state() so that
14469          * we know whether or not to switch out of Vaux power.
14470          * When the flag is set, it means that GPIO1 is used for eeprom
14471          * write protect and also implies that it is a LOM where GPIOs
14472          * are not used to switch power.
14473          */
14474         tg3_get_eeprom_hw_cfg(tp);
14475
14476         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14477                 tg3_flag_clear(tp, TSO_CAPABLE);
14478                 tg3_flag_clear(tp, TSO_BUG);
14479                 tp->fw_needed = NULL;
14480         }
14481
14482         if (tg3_flag(tp, ENABLE_APE)) {
14483                 /* Allow reads and writes to the
14484                  * APE register and memory space.
14485                  */
14486                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14487                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14488                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14489                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14490                                        pci_state_reg);
14491
14492                 tg3_ape_lock_init(tp);
14493         }
14494
14495         /* Set up tp->grc_local_ctrl before calling
14496          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14497          * will bring 5700's external PHY out of reset.
14498          * It is also used as eeprom write protect on LOMs.
14499          */
14500         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14502             tg3_flag(tp, EEPROM_WRITE_PROT))
14503                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14504                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14505         /* Unused GPIO3 must be driven as output on 5752 because there
14506          * are no pull-up resistors on unused GPIO pins.
14507          */
14508         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14509                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14510
14511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14513             tg3_flag(tp, 57765_CLASS))
14514                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14515
14516         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14517             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14518                 /* Turn off the debug UART. */
14519                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14520                 if (tg3_flag(tp, IS_NIC))
14521                         /* Keep VMain power. */
14522                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14523                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14524         }
14525
14526         /* Switch out of Vaux if it is a NIC */
14527         tg3_pwrsrc_switch_to_vmain(tp);
14528
14529         /* Derive initial jumbo mode from MTU assigned in
14530          * ether_setup() via the alloc_etherdev() call
14531          */
14532         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14533                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14534
14535         /* Determine WakeOnLan speed to use. */
14536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14537             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14538             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14539             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14540                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14541         } else {
14542                 tg3_flag_set(tp, WOL_SPEED_100MB);
14543         }
14544
14545         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14546                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14547
14548         /* A few boards don't want Ethernet@WireSpeed phy feature */
14549         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14550             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14551              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14552              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14553             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14554             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14555                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14556
14557         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14558             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14559                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14560         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14561                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14562
14563         if (tg3_flag(tp, 5705_PLUS) &&
14564             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14565             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14566             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14567             !tg3_flag(tp, 57765_PLUS)) {
14568                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14569                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14570                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14571                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14572                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14573                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14574                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14575                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14576                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14577                 } else
14578                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14579         }
14580
14581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14582             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14583                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14584                 if (tp->phy_otp == 0)
14585                         tp->phy_otp = TG3_OTP_DEFAULT;
14586         }
14587
14588         if (tg3_flag(tp, CPMU_PRESENT))
14589                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14590         else
14591                 tp->mi_mode = MAC_MI_MODE_BASE;
14592
14593         tp->coalesce_mode = 0;
14594         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14595             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14596                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14597
14598         /* Set these bits to enable statistics workaround. */
14599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14600             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14601             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14602                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14603                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14604         }
14605
14606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14608                 tg3_flag_set(tp, USE_PHYLIB);
14609
14610         err = tg3_mdio_init(tp);
14611         if (err)
14612                 return err;
14613
14614         /* Initialize data/descriptor byte/word swapping. */
14615         val = tr32(GRC_MODE);
14616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14617                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14618                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14619                         GRC_MODE_B2HRX_ENABLE |
14620                         GRC_MODE_HTX2B_ENABLE |
14621                         GRC_MODE_HOST_STACKUP);
14622         else
14623                 val &= GRC_MODE_HOST_STACKUP;
14624
14625         tw32(GRC_MODE, val | tp->grc_mode);
14626
14627         tg3_switch_clocks(tp);
14628
14629         /* Clear this out for sanity. */
14630         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14631
14632         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14633                               &pci_state_reg);
14634         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14635             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14636                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14637
14638                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14639                     chiprevid == CHIPREV_ID_5701_B0 ||
14640                     chiprevid == CHIPREV_ID_5701_B2 ||
14641                     chiprevid == CHIPREV_ID_5701_B5) {
14642                         void __iomem *sram_base;
14643
14644                         /* Write some dummy words into the SRAM status block
14645                          * area, see if it reads back correctly.  If the return
14646                          * value is bad, force enable the PCIX workaround.
14647                          */
14648                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14649
14650                         writel(0x00000000, sram_base);
14651                         writel(0x00000000, sram_base + 4);
14652                         writel(0xffffffff, sram_base + 4);
14653                         if (readl(sram_base) != 0x00000000)
14654                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14655                 }
14656         }
14657
14658         udelay(50);
14659         tg3_nvram_init(tp);
14660
14661         grc_misc_cfg = tr32(GRC_MISC_CFG);
14662         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14663
14664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14665             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14666              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14667                 tg3_flag_set(tp, IS_5788);
14668
14669         if (!tg3_flag(tp, IS_5788) &&
14670             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14671                 tg3_flag_set(tp, TAGGED_STATUS);
14672         if (tg3_flag(tp, TAGGED_STATUS)) {
14673                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14674                                       HOSTCC_MODE_CLRTICK_TXBD);
14675
14676                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14677                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14678                                        tp->misc_host_ctrl);
14679         }
14680
14681         /* Preserve the APE MAC_MODE bits */
14682         if (tg3_flag(tp, ENABLE_APE))
14683                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14684         else
14685                 tp->mac_mode = 0;
14686
14687         /* these are limited to 10/100 only */
14688         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14689              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14690             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14691              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14692              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14693               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14694               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14695             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14696              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14697               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14698               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14699             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14700             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14701             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14702             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14703                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14704
14705         err = tg3_phy_probe(tp);
14706         if (err) {
14707                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14708                 /* ... but do not return immediately ... */
14709                 tg3_mdio_fini(tp);
14710         }
14711
14712         tg3_read_vpd(tp);
14713         tg3_read_fw_ver(tp);
14714
14715         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14716                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14717         } else {
14718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14719                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14720                 else
14721                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14722         }
14723
14724         /* 5700 {AX,BX} chips have a broken status block link
14725          * change bit implementation, so we must use the
14726          * status register in those cases.
14727          */
14728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14729                 tg3_flag_set(tp, USE_LINKCHG_REG);
14730         else
14731                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14732
14733         /* The led_ctrl is set during tg3_phy_probe, here we might
14734          * have to force the link status polling mechanism based
14735          * upon subsystem IDs.
14736          */
14737         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14738             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14739             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14740                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14741                 tg3_flag_set(tp, USE_LINKCHG_REG);
14742         }
14743
14744         /* For all SERDES we poll the MAC status register. */
14745         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14746                 tg3_flag_set(tp, POLL_SERDES);
14747         else
14748                 tg3_flag_clear(tp, POLL_SERDES);
14749
14750         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14751         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14752         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14753             tg3_flag(tp, PCIX_MODE)) {
14754                 tp->rx_offset = NET_SKB_PAD;
14755 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14756                 tp->rx_copy_thresh = ~(u16)0;
14757 #endif
14758         }
14759
14760         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14761         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14762         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14763
14764         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14765
14766         /* Increment the rx prod index on the rx std ring by at most
14767          * 8 for these chips to workaround hw errata.
14768          */
14769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14770             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14771             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14772                 tp->rx_std_max_post = 8;
14773
14774         if (tg3_flag(tp, ASPM_WORKAROUND))
14775                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14776                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14777
14778         return err;
14779 }
14780
14781 #ifdef CONFIG_SPARC
14782 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14783 {
14784         struct net_device *dev = tp->dev;
14785         struct pci_dev *pdev = tp->pdev;
14786         struct device_node *dp = pci_device_to_OF_node(pdev);
14787         const unsigned char *addr;
14788         int len;
14789
14790         addr = of_get_property(dp, "local-mac-address", &len);
14791         if (addr && len == 6) {
14792                 memcpy(dev->dev_addr, addr, 6);
14793                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14794                 return 0;
14795         }
14796         return -ENODEV;
14797 }
14798
14799 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14800 {
14801         struct net_device *dev = tp->dev;
14802
14803         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14804         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14805         return 0;
14806 }
14807 #endif
14808
14809 static int __devinit tg3_get_device_address(struct tg3 *tp)
14810 {
14811         struct net_device *dev = tp->dev;
14812         u32 hi, lo, mac_offset;
14813         int addr_ok = 0;
14814
14815 #ifdef CONFIG_SPARC
14816         if (!tg3_get_macaddr_sparc(tp))
14817                 return 0;
14818 #endif
14819
14820         mac_offset = 0x7c;
14821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14822             tg3_flag(tp, 5780_CLASS)) {
14823                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14824                         mac_offset = 0xcc;
14825                 if (tg3_nvram_lock(tp))
14826                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14827                 else
14828                         tg3_nvram_unlock(tp);
14829         } else if (tg3_flag(tp, 5717_PLUS)) {
14830                 if (tp->pci_fn & 1)
14831                         mac_offset = 0xcc;
14832                 if (tp->pci_fn > 1)
14833                         mac_offset += 0x18c;
14834         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14835                 mac_offset = 0x10;
14836
14837         /* First try to get it from MAC address mailbox. */
14838         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14839         if ((hi >> 16) == 0x484b) {
14840                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14841                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14842
14843                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14844                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14845                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14846                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14847                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14848
14849                 /* Some old bootcode may report a 0 MAC address in SRAM */
14850                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14851         }
14852         if (!addr_ok) {
14853                 /* Next, try NVRAM. */
14854                 if (!tg3_flag(tp, NO_NVRAM) &&
14855                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14856                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14857                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14858                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14859                 }
14860                 /* Finally just fetch it out of the MAC control regs. */
14861                 else {
14862                         hi = tr32(MAC_ADDR_0_HIGH);
14863                         lo = tr32(MAC_ADDR_0_LOW);
14864
14865                         dev->dev_addr[5] = lo & 0xff;
14866                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14867                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14868                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14869                         dev->dev_addr[1] = hi & 0xff;
14870                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14871                 }
14872         }
14873
14874         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14875 #ifdef CONFIG_SPARC
14876                 if (!tg3_get_default_macaddr_sparc(tp))
14877                         return 0;
14878 #endif
14879                 return -EINVAL;
14880         }
14881         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14882         return 0;
14883 }
14884
14885 #define BOUNDARY_SINGLE_CACHELINE       1
14886 #define BOUNDARY_MULTI_CACHELINE        2
14887
14888 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14889 {
14890         int cacheline_size;
14891         u8 byte;
14892         int goal;
14893
14894         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14895         if (byte == 0)
14896                 cacheline_size = 1024;
14897         else
14898                 cacheline_size = (int) byte * 4;
14899
14900         /* On 5703 and later chips, the boundary bits have no
14901          * effect.
14902          */
14903         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14904             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14905             !tg3_flag(tp, PCI_EXPRESS))
14906                 goto out;
14907
14908 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14909         goal = BOUNDARY_MULTI_CACHELINE;
14910 #else
14911 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14912         goal = BOUNDARY_SINGLE_CACHELINE;
14913 #else
14914         goal = 0;
14915 #endif
14916 #endif
14917
14918         if (tg3_flag(tp, 57765_PLUS)) {
14919                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14920                 goto out;
14921         }
14922
14923         if (!goal)
14924                 goto out;
14925
14926         /* PCI controllers on most RISC systems tend to disconnect
14927          * when a device tries to burst across a cache-line boundary.
14928          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14929          *
14930          * Unfortunately, for PCI-E there are only limited
14931          * write-side controls for this, and thus for reads
14932          * we will still get the disconnects.  We'll also waste
14933          * these PCI cycles for both read and write for chips
14934          * other than 5700 and 5701 which do not implement the
14935          * boundary bits.
14936          */
14937         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14938                 switch (cacheline_size) {
14939                 case 16:
14940                 case 32:
14941                 case 64:
14942                 case 128:
14943                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14944                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14945                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14946                         } else {
14947                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14948                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14949                         }
14950                         break;
14951
14952                 case 256:
14953                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14954                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14955                         break;
14956
14957                 default:
14958                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14959                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14960                         break;
14961                 }
14962         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14963                 switch (cacheline_size) {
14964                 case 16:
14965                 case 32:
14966                 case 64:
14967                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14968                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14969                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14970                                 break;
14971                         }
14972                         /* fallthrough */
14973                 case 128:
14974                 default:
14975                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14976                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14977                         break;
14978                 }
14979         } else {
14980                 switch (cacheline_size) {
14981                 case 16:
14982                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14983                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14984                                         DMA_RWCTRL_WRITE_BNDRY_16);
14985                                 break;
14986                         }
14987                         /* fallthrough */
14988                 case 32:
14989                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14990                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14991                                         DMA_RWCTRL_WRITE_BNDRY_32);
14992                                 break;
14993                         }
14994                         /* fallthrough */
14995                 case 64:
14996                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14997                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14998                                         DMA_RWCTRL_WRITE_BNDRY_64);
14999                                 break;
15000                         }
15001                         /* fallthrough */
15002                 case 128:
15003                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15004                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15005                                         DMA_RWCTRL_WRITE_BNDRY_128);
15006                                 break;
15007                         }
15008                         /* fallthrough */
15009                 case 256:
15010                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15011                                 DMA_RWCTRL_WRITE_BNDRY_256);
15012                         break;
15013                 case 512:
15014                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15015                                 DMA_RWCTRL_WRITE_BNDRY_512);
15016                         break;
15017                 case 1024:
15018                 default:
15019                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15020                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15021                         break;
15022                 }
15023         }
15024
15025 out:
15026         return val;
15027 }
15028
15029 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15030 {
15031         struct tg3_internal_buffer_desc test_desc;
15032         u32 sram_dma_descs;
15033         int i, ret;
15034
15035         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15036
15037         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15038         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15039         tw32(RDMAC_STATUS, 0);
15040         tw32(WDMAC_STATUS, 0);
15041
15042         tw32(BUFMGR_MODE, 0);
15043         tw32(FTQ_RESET, 0);
15044
15045         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15046         test_desc.addr_lo = buf_dma & 0xffffffff;
15047         test_desc.nic_mbuf = 0x00002100;
15048         test_desc.len = size;
15049
15050         /*
15051          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15052          * the *second* time the tg3 driver was getting loaded after an
15053          * initial scan.
15054          *
15055          * Broadcom tells me:
15056          *   ...the DMA engine is connected to the GRC block and a DMA
15057          *   reset may affect the GRC block in some unpredictable way...
15058          *   The behavior of resets to individual blocks has not been tested.
15059          *
15060          * Broadcom noted the GRC reset will also reset all sub-components.
15061          */
15062         if (to_device) {
15063                 test_desc.cqid_sqid = (13 << 8) | 2;
15064
15065                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15066                 udelay(40);
15067         } else {
15068                 test_desc.cqid_sqid = (16 << 8) | 7;
15069
15070                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15071                 udelay(40);
15072         }
15073         test_desc.flags = 0x00000005;
15074
15075         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15076                 u32 val;
15077
15078                 val = *(((u32 *)&test_desc) + i);
15079                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15080                                        sram_dma_descs + (i * sizeof(u32)));
15081                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15082         }
15083         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15084
15085         if (to_device)
15086                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15087         else
15088                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15089
15090         ret = -ENODEV;
15091         for (i = 0; i < 40; i++) {
15092                 u32 val;
15093
15094                 if (to_device)
15095                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15096                 else
15097                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15098                 if ((val & 0xffff) == sram_dma_descs) {
15099                         ret = 0;
15100                         break;
15101                 }
15102
15103                 udelay(100);
15104         }
15105
15106         return ret;
15107 }
15108
15109 #define TEST_BUFFER_SIZE        0x2000
15110
15111 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15112         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15113         { },
15114 };
15115
15116 static int __devinit tg3_test_dma(struct tg3 *tp)
15117 {
15118         dma_addr_t buf_dma;
15119         u32 *buf, saved_dma_rwctrl;
15120         int ret = 0;
15121
15122         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15123                                  &buf_dma, GFP_KERNEL);
15124         if (!buf) {
15125                 ret = -ENOMEM;
15126                 goto out_nofree;
15127         }
15128
15129         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15130                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15131
15132         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15133
15134         if (tg3_flag(tp, 57765_PLUS))
15135                 goto out;
15136
15137         if (tg3_flag(tp, PCI_EXPRESS)) {
15138                 /* DMA read watermark not used on PCIE */
15139                 tp->dma_rwctrl |= 0x00180000;
15140         } else if (!tg3_flag(tp, PCIX_MODE)) {
15141                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15142                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15143                         tp->dma_rwctrl |= 0x003f0000;
15144                 else
15145                         tp->dma_rwctrl |= 0x003f000f;
15146         } else {
15147                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15148                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15149                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15150                         u32 read_water = 0x7;
15151
15152                         /* If the 5704 is behind the EPB bridge, we can
15153                          * do the less restrictive ONE_DMA workaround for
15154                          * better performance.
15155                          */
15156                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15157                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15158                                 tp->dma_rwctrl |= 0x8000;
15159                         else if (ccval == 0x6 || ccval == 0x7)
15160                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15161
15162                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15163                                 read_water = 4;
15164                         /* Set bit 23 to enable PCIX hw bug fix */
15165                         tp->dma_rwctrl |=
15166                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15167                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15168                                 (1 << 23);
15169                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15170                         /* 5780 always in PCIX mode */
15171                         tp->dma_rwctrl |= 0x00144000;
15172                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15173                         /* 5714 always in PCIX mode */
15174                         tp->dma_rwctrl |= 0x00148000;
15175                 } else {
15176                         tp->dma_rwctrl |= 0x001b000f;
15177                 }
15178         }
15179
15180         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15181             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15182                 tp->dma_rwctrl &= 0xfffffff0;
15183
15184         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15186                 /* Remove this if it causes problems for some boards. */
15187                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15188
15189                 /* On 5700/5701 chips, we need to set this bit.
15190                  * Otherwise the chip will issue cacheline transactions
15191                  * to streamable DMA memory with not all the byte
15192                  * enables turned on.  This is an error on several
15193                  * RISC PCI controllers, in particular sparc64.
15194                  *
15195                  * On 5703/5704 chips, this bit has been reassigned
15196                  * a different meaning.  In particular, it is used
15197                  * on those chips to enable a PCI-X workaround.
15198                  */
15199                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15200         }
15201
15202         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15203
15204 #if 0
15205         /* Unneeded, already done by tg3_get_invariants.  */
15206         tg3_switch_clocks(tp);
15207 #endif
15208
15209         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15210             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15211                 goto out;
15212
15213         /* It is best to perform DMA test with maximum write burst size
15214          * to expose the 5700/5701 write DMA bug.
15215          */
15216         saved_dma_rwctrl = tp->dma_rwctrl;
15217         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15218         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15219
15220         while (1) {
15221                 u32 *p = buf, i;
15222
15223                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15224                         p[i] = i;
15225
15226                 /* Send the buffer to the chip. */
15227                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15228                 if (ret) {
15229                         dev_err(&tp->pdev->dev,
15230                                 "%s: Buffer write failed. err = %d\n",
15231                                 __func__, ret);
15232                         break;
15233                 }
15234
15235 #if 0
15236                 /* validate data reached card RAM correctly. */
15237                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15238                         u32 val;
15239                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15240                         if (le32_to_cpu(val) != p[i]) {
15241                                 dev_err(&tp->pdev->dev,
15242                                         "%s: Buffer corrupted on device! "
15243                                         "(%d != %d)\n", __func__, val, i);
15244                                 /* ret = -ENODEV here? */
15245                         }
15246                         p[i] = 0;
15247                 }
15248 #endif
15249                 /* Now read it back. */
15250                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15251                 if (ret) {
15252                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15253                                 "err = %d\n", __func__, ret);
15254                         break;
15255                 }
15256
15257                 /* Verify it. */
15258                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15259                         if (p[i] == i)
15260                                 continue;
15261
15262                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15263                             DMA_RWCTRL_WRITE_BNDRY_16) {
15264                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15265                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15266                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15267                                 break;
15268                         } else {
15269                                 dev_err(&tp->pdev->dev,
15270                                         "%s: Buffer corrupted on read back! "
15271                                         "(%d != %d)\n", __func__, p[i], i);
15272                                 ret = -ENODEV;
15273                                 goto out;
15274                         }
15275                 }
15276
15277                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15278                         /* Success. */
15279                         ret = 0;
15280                         break;
15281                 }
15282         }
15283         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15284             DMA_RWCTRL_WRITE_BNDRY_16) {
15285                 /* DMA test passed without adjusting DMA boundary,
15286                  * now look for chipsets that are known to expose the
15287                  * DMA bug without failing the test.
15288                  */
15289                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15290                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15291                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15292                 } else {
15293                         /* Safe to use the calculated DMA boundary. */
15294                         tp->dma_rwctrl = saved_dma_rwctrl;
15295                 }
15296
15297                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15298         }
15299
15300 out:
15301         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15302 out_nofree:
15303         return ret;
15304 }
15305
15306 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15307 {
15308         if (tg3_flag(tp, 57765_PLUS)) {
15309                 tp->bufmgr_config.mbuf_read_dma_low_water =
15310                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15311                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15312                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15313                 tp->bufmgr_config.mbuf_high_water =
15314                         DEFAULT_MB_HIGH_WATER_57765;
15315
15316                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15317                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15318                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15319                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15320                 tp->bufmgr_config.mbuf_high_water_jumbo =
15321                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15322         } else if (tg3_flag(tp, 5705_PLUS)) {
15323                 tp->bufmgr_config.mbuf_read_dma_low_water =
15324                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15325                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15326                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15327                 tp->bufmgr_config.mbuf_high_water =
15328                         DEFAULT_MB_HIGH_WATER_5705;
15329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15330                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15331                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15332                         tp->bufmgr_config.mbuf_high_water =
15333                                 DEFAULT_MB_HIGH_WATER_5906;
15334                 }
15335
15336                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15337                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15338                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15339                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15340                 tp->bufmgr_config.mbuf_high_water_jumbo =
15341                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15342         } else {
15343                 tp->bufmgr_config.mbuf_read_dma_low_water =
15344                         DEFAULT_MB_RDMA_LOW_WATER;
15345                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15346                         DEFAULT_MB_MACRX_LOW_WATER;
15347                 tp->bufmgr_config.mbuf_high_water =
15348                         DEFAULT_MB_HIGH_WATER;
15349
15350                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15351                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15352                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15353                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15354                 tp->bufmgr_config.mbuf_high_water_jumbo =
15355                         DEFAULT_MB_HIGH_WATER_JUMBO;
15356         }
15357
15358         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15359         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15360 }
15361
15362 static char * __devinit tg3_phy_string(struct tg3 *tp)
15363 {
15364         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15365         case TG3_PHY_ID_BCM5400:        return "5400";
15366         case TG3_PHY_ID_BCM5401:        return "5401";
15367         case TG3_PHY_ID_BCM5411:        return "5411";
15368         case TG3_PHY_ID_BCM5701:        return "5701";
15369         case TG3_PHY_ID_BCM5703:        return "5703";
15370         case TG3_PHY_ID_BCM5704:        return "5704";
15371         case TG3_PHY_ID_BCM5705:        return "5705";
15372         case TG3_PHY_ID_BCM5750:        return "5750";
15373         case TG3_PHY_ID_BCM5752:        return "5752";
15374         case TG3_PHY_ID_BCM5714:        return "5714";
15375         case TG3_PHY_ID_BCM5780:        return "5780";
15376         case TG3_PHY_ID_BCM5755:        return "5755";
15377         case TG3_PHY_ID_BCM5787:        return "5787";
15378         case TG3_PHY_ID_BCM5784:        return "5784";
15379         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15380         case TG3_PHY_ID_BCM5906:        return "5906";
15381         case TG3_PHY_ID_BCM5761:        return "5761";
15382         case TG3_PHY_ID_BCM5718C:       return "5718C";
15383         case TG3_PHY_ID_BCM5718S:       return "5718S";
15384         case TG3_PHY_ID_BCM57765:       return "57765";
15385         case TG3_PHY_ID_BCM5719C:       return "5719C";
15386         case TG3_PHY_ID_BCM5720C:       return "5720C";
15387         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15388         case 0:                 return "serdes";
15389         default:                return "unknown";
15390         }
15391 }
15392
15393 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15394 {
15395         if (tg3_flag(tp, PCI_EXPRESS)) {
15396                 strcpy(str, "PCI Express");
15397                 return str;
15398         } else if (tg3_flag(tp, PCIX_MODE)) {
15399                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15400
15401                 strcpy(str, "PCIX:");
15402
15403                 if ((clock_ctrl == 7) ||
15404                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15405                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15406                         strcat(str, "133MHz");
15407                 else if (clock_ctrl == 0)
15408                         strcat(str, "33MHz");
15409                 else if (clock_ctrl == 2)
15410                         strcat(str, "50MHz");
15411                 else if (clock_ctrl == 4)
15412                         strcat(str, "66MHz");
15413                 else if (clock_ctrl == 6)
15414                         strcat(str, "100MHz");
15415         } else {
15416                 strcpy(str, "PCI:");
15417                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15418                         strcat(str, "66MHz");
15419                 else
15420                         strcat(str, "33MHz");
15421         }
15422         if (tg3_flag(tp, PCI_32BIT))
15423                 strcat(str, ":32-bit");
15424         else
15425                 strcat(str, ":64-bit");
15426         return str;
15427 }
15428
15429 static void __devinit tg3_init_coal(struct tg3 *tp)
15430 {
15431         struct ethtool_coalesce *ec = &tp->coal;
15432
15433         memset(ec, 0, sizeof(*ec));
15434         ec->cmd = ETHTOOL_GCOALESCE;
15435         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15436         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15437         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15438         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15439         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15440         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15441         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15442         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15443         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15444
15445         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15446                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15447                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15448                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15449                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15450                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15451         }
15452
15453         if (tg3_flag(tp, 5705_PLUS)) {
15454                 ec->rx_coalesce_usecs_irq = 0;
15455                 ec->tx_coalesce_usecs_irq = 0;
15456                 ec->stats_block_coalesce_usecs = 0;
15457         }
15458 }
15459
15460 static int __devinit tg3_init_one(struct pci_dev *pdev,
15461                                   const struct pci_device_id *ent)
15462 {
15463         struct net_device *dev;
15464         struct tg3 *tp;
15465         int i, err, pm_cap;
15466         u32 sndmbx, rcvmbx, intmbx;
15467         char str[40];
15468         u64 dma_mask, persist_dma_mask;
15469         netdev_features_t features = 0;
15470
15471         printk_once(KERN_INFO "%s\n", version);
15472
15473         err = pci_enable_device(pdev);
15474         if (err) {
15475                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15476                 return err;
15477         }
15478
15479         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15480         if (err) {
15481                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15482                 goto err_out_disable_pdev;
15483         }
15484
15485         pci_set_master(pdev);
15486
15487         /* Find power-management capability. */
15488         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15489         if (pm_cap == 0) {
15490                 dev_err(&pdev->dev,
15491                         "Cannot find Power Management capability, aborting\n");
15492                 err = -EIO;
15493                 goto err_out_free_res;
15494         }
15495
15496         err = pci_set_power_state(pdev, PCI_D0);
15497         if (err) {
15498                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15499                 goto err_out_free_res;
15500         }
15501
15502         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15503         if (!dev) {
15504                 err = -ENOMEM;
15505                 goto err_out_power_down;
15506         }
15507
15508         SET_NETDEV_DEV(dev, &pdev->dev);
15509
15510         tp = netdev_priv(dev);
15511         tp->pdev = pdev;
15512         tp->dev = dev;
15513         tp->pm_cap = pm_cap;
15514         tp->rx_mode = TG3_DEF_RX_MODE;
15515         tp->tx_mode = TG3_DEF_TX_MODE;
15516
15517         if (tg3_debug > 0)
15518                 tp->msg_enable = tg3_debug;
15519         else
15520                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15521
15522         /* The word/byte swap controls here control register access byte
15523          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15524          * setting below.
15525          */
15526         tp->misc_host_ctrl =
15527                 MISC_HOST_CTRL_MASK_PCI_INT |
15528                 MISC_HOST_CTRL_WORD_SWAP |
15529                 MISC_HOST_CTRL_INDIR_ACCESS |
15530                 MISC_HOST_CTRL_PCISTATE_RW;
15531
15532         /* The NONFRM (non-frame) byte/word swap controls take effect
15533          * on descriptor entries, anything which isn't packet data.
15534          *
15535          * The StrongARM chips on the board (one for tx, one for rx)
15536          * are running in big-endian mode.
15537          */
15538         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15539                         GRC_MODE_WSWAP_NONFRM_DATA);
15540 #ifdef __BIG_ENDIAN
15541         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15542 #endif
15543         spin_lock_init(&tp->lock);
15544         spin_lock_init(&tp->indirect_lock);
15545         INIT_WORK(&tp->reset_task, tg3_reset_task);
15546
15547         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15548         if (!tp->regs) {
15549                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15550                 err = -ENOMEM;
15551                 goto err_out_free_dev;
15552         }
15553
15554         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15555             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15556             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15557             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15558             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15559             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15560             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15561             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15562                 tg3_flag_set(tp, ENABLE_APE);
15563                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15564                 if (!tp->aperegs) {
15565                         dev_err(&pdev->dev,
15566                                 "Cannot map APE registers, aborting\n");
15567                         err = -ENOMEM;
15568                         goto err_out_iounmap;
15569                 }
15570         }
15571
15572         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15573         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15574
15575         dev->ethtool_ops = &tg3_ethtool_ops;
15576         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15577         dev->netdev_ops = &tg3_netdev_ops;
15578         dev->irq = pdev->irq;
15579
15580         err = tg3_get_invariants(tp);
15581         if (err) {
15582                 dev_err(&pdev->dev,
15583                         "Problem fetching invariants of chip, aborting\n");
15584                 goto err_out_apeunmap;
15585         }
15586
15587         /* The EPB bridge inside 5714, 5715, and 5780 and any
15588          * device behind the EPB cannot support DMA addresses > 40-bit.
15589          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15590          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15591          * do DMA address check in tg3_start_xmit().
15592          */
15593         if (tg3_flag(tp, IS_5788))
15594                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15595         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15596                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15597 #ifdef CONFIG_HIGHMEM
15598                 dma_mask = DMA_BIT_MASK(64);
15599 #endif
15600         } else
15601                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15602
15603         /* Configure DMA attributes. */
15604         if (dma_mask > DMA_BIT_MASK(32)) {
15605                 err = pci_set_dma_mask(pdev, dma_mask);
15606                 if (!err) {
15607                         features |= NETIF_F_HIGHDMA;
15608                         err = pci_set_consistent_dma_mask(pdev,
15609                                                           persist_dma_mask);
15610                         if (err < 0) {
15611                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15612                                         "DMA for consistent allocations\n");
15613                                 goto err_out_apeunmap;
15614                         }
15615                 }
15616         }
15617         if (err || dma_mask == DMA_BIT_MASK(32)) {
15618                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15619                 if (err) {
15620                         dev_err(&pdev->dev,
15621                                 "No usable DMA configuration, aborting\n");
15622                         goto err_out_apeunmap;
15623                 }
15624         }
15625
15626         tg3_init_bufmgr_config(tp);
15627
15628         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15629
15630         /* 5700 B0 chips do not support checksumming correctly due
15631          * to hardware bugs.
15632          */
15633         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15634                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15635
15636                 if (tg3_flag(tp, 5755_PLUS))
15637                         features |= NETIF_F_IPV6_CSUM;
15638         }
15639
15640         /* TSO is on by default on chips that support hardware TSO.
15641          * Firmware TSO on older chips gives lower performance, so it
15642          * is off by default, but can be enabled using ethtool.
15643          */
15644         if ((tg3_flag(tp, HW_TSO_1) ||
15645              tg3_flag(tp, HW_TSO_2) ||
15646              tg3_flag(tp, HW_TSO_3)) &&
15647             (features & NETIF_F_IP_CSUM))
15648                 features |= NETIF_F_TSO;
15649         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15650                 if (features & NETIF_F_IPV6_CSUM)
15651                         features |= NETIF_F_TSO6;
15652                 if (tg3_flag(tp, HW_TSO_3) ||
15653                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15654                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15655                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15656                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15657                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15658                         features |= NETIF_F_TSO_ECN;
15659         }
15660
15661         dev->features |= features;
15662         dev->vlan_features |= features;
15663
15664         /*
15665          * Add loopback capability only for a subset of devices that support
15666          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15667          * loopback for the remaining devices.
15668          */
15669         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15670             !tg3_flag(tp, CPMU_PRESENT))
15671                 /* Add the loopback capability */
15672                 features |= NETIF_F_LOOPBACK;
15673
15674         dev->hw_features |= features;
15675
15676         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15677             !tg3_flag(tp, TSO_CAPABLE) &&
15678             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15679                 tg3_flag_set(tp, MAX_RXPEND_64);
15680                 tp->rx_pending = 63;
15681         }
15682
15683         err = tg3_get_device_address(tp);
15684         if (err) {
15685                 dev_err(&pdev->dev,
15686                         "Could not obtain valid ethernet address, aborting\n");
15687                 goto err_out_apeunmap;
15688         }
15689
15690         /*
15691          * Reset chip in case UNDI or EFI driver did not shutdown
15692          * DMA self test will enable WDMAC and we'll see (spurious)
15693          * pending DMA on the PCI bus at that point.
15694          */
15695         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15696             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15697                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15698                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15699         }
15700
15701         err = tg3_test_dma(tp);
15702         if (err) {
15703                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15704                 goto err_out_apeunmap;
15705         }
15706
15707         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15708         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15709         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15710         for (i = 0; i < tp->irq_max; i++) {
15711                 struct tg3_napi *tnapi = &tp->napi[i];
15712
15713                 tnapi->tp = tp;
15714                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15715
15716                 tnapi->int_mbox = intmbx;
15717                 if (i <= 4)
15718                         intmbx += 0x8;
15719                 else
15720                         intmbx += 0x4;
15721
15722                 tnapi->consmbox = rcvmbx;
15723                 tnapi->prodmbox = sndmbx;
15724
15725                 if (i)
15726                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15727                 else
15728                         tnapi->coal_now = HOSTCC_MODE_NOW;
15729
15730                 if (!tg3_flag(tp, SUPPORT_MSIX))
15731                         break;
15732
15733                 /*
15734                  * If we support MSIX, we'll be using RSS.  If we're using
15735                  * RSS, the first vector only handles link interrupts and the
15736                  * remaining vectors handle rx and tx interrupts.  Reuse the
15737                  * mailbox values for the next iteration.  The values we setup
15738                  * above are still useful for the single vectored mode.
15739                  */
15740                 if (!i)
15741                         continue;
15742
15743                 rcvmbx += 0x8;
15744
15745                 if (sndmbx & 0x4)
15746                         sndmbx -= 0x4;
15747                 else
15748                         sndmbx += 0xc;
15749         }
15750
15751         tg3_init_coal(tp);
15752
15753         pci_set_drvdata(pdev, dev);
15754
15755         if (tg3_flag(tp, 5717_PLUS)) {
15756                 /* Resume a low-power mode */
15757                 tg3_frob_aux_power(tp, false);
15758         }
15759
15760         tg3_timer_init(tp);
15761
15762         err = register_netdev(dev);
15763         if (err) {
15764                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15765                 goto err_out_apeunmap;
15766         }
15767
15768         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15769                     tp->board_part_number,
15770                     tp->pci_chip_rev_id,
15771                     tg3_bus_string(tp, str),
15772                     dev->dev_addr);
15773
15774         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15775                 struct phy_device *phydev;
15776                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15777                 netdev_info(dev,
15778                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15779                             phydev->drv->name, dev_name(&phydev->dev));
15780         } else {
15781                 char *ethtype;
15782
15783                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15784                         ethtype = "10/100Base-TX";
15785                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15786                         ethtype = "1000Base-SX";
15787                 else
15788                         ethtype = "10/100/1000Base-T";
15789
15790                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15791                             "(WireSpeed[%d], EEE[%d])\n",
15792                             tg3_phy_string(tp), ethtype,
15793                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15794                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15795         }
15796
15797         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15798                     (dev->features & NETIF_F_RXCSUM) != 0,
15799                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15800                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15801                     tg3_flag(tp, ENABLE_ASF) != 0,
15802                     tg3_flag(tp, TSO_CAPABLE) != 0);
15803         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15804                     tp->dma_rwctrl,
15805                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15806                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15807
15808         pci_save_state(pdev);
15809
15810         return 0;
15811
15812 err_out_apeunmap:
15813         if (tp->aperegs) {
15814                 iounmap(tp->aperegs);
15815                 tp->aperegs = NULL;
15816         }
15817
15818 err_out_iounmap:
15819         if (tp->regs) {
15820                 iounmap(tp->regs);
15821                 tp->regs = NULL;
15822         }
15823
15824 err_out_free_dev:
15825         free_netdev(dev);
15826
15827 err_out_power_down:
15828         pci_set_power_state(pdev, PCI_D3hot);
15829
15830 err_out_free_res:
15831         pci_release_regions(pdev);
15832
15833 err_out_disable_pdev:
15834         pci_disable_device(pdev);
15835         pci_set_drvdata(pdev, NULL);
15836         return err;
15837 }
15838
15839 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15840 {
15841         struct net_device *dev = pci_get_drvdata(pdev);
15842
15843         if (dev) {
15844                 struct tg3 *tp = netdev_priv(dev);
15845
15846                 if (tp->fw)
15847                         release_firmware(tp->fw);
15848
15849                 tg3_reset_task_cancel(tp);
15850
15851                 if (tg3_flag(tp, USE_PHYLIB)) {
15852                         tg3_phy_fini(tp);
15853                         tg3_mdio_fini(tp);
15854                 }
15855
15856                 unregister_netdev(dev);
15857                 if (tp->aperegs) {
15858                         iounmap(tp->aperegs);
15859                         tp->aperegs = NULL;
15860                 }
15861                 if (tp->regs) {
15862                         iounmap(tp->regs);
15863                         tp->regs = NULL;
15864                 }
15865                 free_netdev(dev);
15866                 pci_release_regions(pdev);
15867                 pci_disable_device(pdev);
15868                 pci_set_drvdata(pdev, NULL);
15869         }
15870 }
15871
15872 #ifdef CONFIG_PM_SLEEP
15873 static int tg3_suspend(struct device *device)
15874 {
15875         struct pci_dev *pdev = to_pci_dev(device);
15876         struct net_device *dev = pci_get_drvdata(pdev);
15877         struct tg3 *tp = netdev_priv(dev);
15878         int err;
15879
15880         if (!netif_running(dev))
15881                 return 0;
15882
15883         tg3_reset_task_cancel(tp);
15884         tg3_phy_stop(tp);
15885         tg3_netif_stop(tp);
15886
15887         tg3_timer_stop(tp);
15888
15889         tg3_full_lock(tp, 1);
15890         tg3_disable_ints(tp);
15891         tg3_full_unlock(tp);
15892
15893         netif_device_detach(dev);
15894
15895         tg3_full_lock(tp, 0);
15896         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15897         tg3_flag_clear(tp, INIT_COMPLETE);
15898         tg3_full_unlock(tp);
15899
15900         err = tg3_power_down_prepare(tp);
15901         if (err) {
15902                 int err2;
15903
15904                 tg3_full_lock(tp, 0);
15905
15906                 tg3_flag_set(tp, INIT_COMPLETE);
15907                 err2 = tg3_restart_hw(tp, 1);
15908                 if (err2)
15909                         goto out;
15910
15911                 tg3_timer_start(tp);
15912
15913                 netif_device_attach(dev);
15914                 tg3_netif_start(tp);
15915
15916 out:
15917                 tg3_full_unlock(tp);
15918
15919                 if (!err2)
15920                         tg3_phy_start(tp);
15921         }
15922
15923         return err;
15924 }
15925
15926 static int tg3_resume(struct device *device)
15927 {
15928         struct pci_dev *pdev = to_pci_dev(device);
15929         struct net_device *dev = pci_get_drvdata(pdev);
15930         struct tg3 *tp = netdev_priv(dev);
15931         int err;
15932
15933         if (!netif_running(dev))
15934                 return 0;
15935
15936         netif_device_attach(dev);
15937
15938         tg3_full_lock(tp, 0);
15939
15940         tg3_flag_set(tp, INIT_COMPLETE);
15941         err = tg3_restart_hw(tp, 1);
15942         if (err)
15943                 goto out;
15944
15945         tg3_timer_start(tp);
15946
15947         tg3_netif_start(tp);
15948
15949 out:
15950         tg3_full_unlock(tp);
15951
15952         if (!err)
15953                 tg3_phy_start(tp);
15954
15955         return err;
15956 }
15957
15958 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15959 #define TG3_PM_OPS (&tg3_pm_ops)
15960
15961 #else
15962
15963 #define TG3_PM_OPS NULL
15964
15965 #endif /* CONFIG_PM_SLEEP */
15966
15967 /**
15968  * tg3_io_error_detected - called when PCI error is detected
15969  * @pdev: Pointer to PCI device
15970  * @state: The current pci connection state
15971  *
15972  * This function is called after a PCI bus error affecting
15973  * this device has been detected.
15974  */
15975 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15976                                               pci_channel_state_t state)
15977 {
15978         struct net_device *netdev = pci_get_drvdata(pdev);
15979         struct tg3 *tp = netdev_priv(netdev);
15980         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15981
15982         netdev_info(netdev, "PCI I/O error detected\n");
15983
15984         rtnl_lock();
15985
15986         if (!netif_running(netdev))
15987                 goto done;
15988
15989         tg3_phy_stop(tp);
15990
15991         tg3_netif_stop(tp);
15992
15993         tg3_timer_stop(tp);
15994
15995         /* Want to make sure that the reset task doesn't run */
15996         tg3_reset_task_cancel(tp);
15997
15998         netif_device_detach(netdev);
15999
16000         /* Clean up software state, even if MMIO is blocked */
16001         tg3_full_lock(tp, 0);
16002         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16003         tg3_full_unlock(tp);
16004
16005 done:
16006         if (state == pci_channel_io_perm_failure)
16007                 err = PCI_ERS_RESULT_DISCONNECT;
16008         else
16009                 pci_disable_device(pdev);
16010
16011         rtnl_unlock();
16012
16013         return err;
16014 }
16015
16016 /**
16017  * tg3_io_slot_reset - called after the pci bus has been reset.
16018  * @pdev: Pointer to PCI device
16019  *
16020  * Restart the card from scratch, as if from a cold-boot.
16021  * At this point, the card has exprienced a hard reset,
16022  * followed by fixups by BIOS, and has its config space
16023  * set up identically to what it was at cold boot.
16024  */
16025 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16026 {
16027         struct net_device *netdev = pci_get_drvdata(pdev);
16028         struct tg3 *tp = netdev_priv(netdev);
16029         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16030         int err;
16031
16032         rtnl_lock();
16033
16034         if (pci_enable_device(pdev)) {
16035                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16036                 goto done;
16037         }
16038
16039         pci_set_master(pdev);
16040         pci_restore_state(pdev);
16041         pci_save_state(pdev);
16042
16043         if (!netif_running(netdev)) {
16044                 rc = PCI_ERS_RESULT_RECOVERED;
16045                 goto done;
16046         }
16047
16048         err = tg3_power_up(tp);
16049         if (err)
16050                 goto done;
16051
16052         rc = PCI_ERS_RESULT_RECOVERED;
16053
16054 done:
16055         rtnl_unlock();
16056
16057         return rc;
16058 }
16059
16060 /**
16061  * tg3_io_resume - called when traffic can start flowing again.
16062  * @pdev: Pointer to PCI device
16063  *
16064  * This callback is called when the error recovery driver tells
16065  * us that its OK to resume normal operation.
16066  */
16067 static void tg3_io_resume(struct pci_dev *pdev)
16068 {
16069         struct net_device *netdev = pci_get_drvdata(pdev);
16070         struct tg3 *tp = netdev_priv(netdev);
16071         int err;
16072
16073         rtnl_lock();
16074
16075         if (!netif_running(netdev))
16076                 goto done;
16077
16078         tg3_full_lock(tp, 0);
16079         tg3_flag_set(tp, INIT_COMPLETE);
16080         err = tg3_restart_hw(tp, 1);
16081         tg3_full_unlock(tp);
16082         if (err) {
16083                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16084                 goto done;
16085         }
16086
16087         netif_device_attach(netdev);
16088
16089         tg3_timer_start(tp);
16090
16091         tg3_netif_start(tp);
16092
16093         tg3_phy_start(tp);
16094
16095 done:
16096         rtnl_unlock();
16097 }
16098
16099 static struct pci_error_handlers tg3_err_handler = {
16100         .error_detected = tg3_io_error_detected,
16101         .slot_reset     = tg3_io_slot_reset,
16102         .resume         = tg3_io_resume
16103 };
16104
16105 static struct pci_driver tg3_driver = {
16106         .name           = DRV_MODULE_NAME,
16107         .id_table       = tg3_pci_tbl,
16108         .probe          = tg3_init_one,
16109         .remove         = __devexit_p(tg3_remove_one),
16110         .err_handler    = &tg3_err_handler,
16111         .driver.pm      = TG3_PM_OPS,
16112 };
16113
16114 static int __init tg3_init(void)
16115 {
16116         return pci_register_driver(&tg3_driver);
16117 }
16118
16119 static void __exit tg3_cleanup(void)
16120 {
16121         pci_unregister_driver(&tg3_driver);
16122 }
16123
16124 module_init(tg3_init);
16125 module_exit(tg3_cleanup);