b865e9fdd089b002d05d2c52ffadf9fae578571d
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     120
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "August 18, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = 0; i < 8; i++) {
632                 if (i == TG3_APE_LOCK_GPIO)
633                         continue;
634                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
635         }
636
637         /* Clear the correct bit of the GPIO lock too. */
638         if (!tp->pci_fn)
639                 bit = APE_LOCK_GRANT_DRIVER;
640         else
641                 bit = 1 << tp->pci_fn;
642
643         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
644 }
645
646 static int tg3_ape_lock(struct tg3 *tp, int locknum)
647 {
648         int i, off;
649         int ret = 0;
650         u32 status, req, gnt, bit;
651
652         if (!tg3_flag(tp, ENABLE_APE))
653                 return 0;
654
655         switch (locknum) {
656         case TG3_APE_LOCK_GPIO:
657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
658                         return 0;
659         case TG3_APE_LOCK_GRC:
660         case TG3_APE_LOCK_MEM:
661                 break;
662         default:
663                 return -EINVAL;
664         }
665
666         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
667                 req = TG3_APE_LOCK_REQ;
668                 gnt = TG3_APE_LOCK_GRANT;
669         } else {
670                 req = TG3_APE_PER_LOCK_REQ;
671                 gnt = TG3_APE_PER_LOCK_GRANT;
672         }
673
674         off = 4 * locknum;
675
676         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677                 bit = APE_LOCK_REQ_DRIVER;
678         else
679                 bit = 1 << tp->pci_fn;
680
681         tg3_ape_write32(tp, req + off, bit);
682
683         /* Wait for up to 1 millisecond to acquire lock. */
684         for (i = 0; i < 100; i++) {
685                 status = tg3_ape_read32(tp, gnt + off);
686                 if (status == bit)
687                         break;
688                 udelay(10);
689         }
690
691         if (status != bit) {
692                 /* Revoke the lock request. */
693                 tg3_ape_write32(tp, gnt + off, bit);
694                 ret = -EBUSY;
695         }
696
697         return ret;
698 }
699
700 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
701 {
702         u32 gnt, bit;
703
704         if (!tg3_flag(tp, ENABLE_APE))
705                 return;
706
707         switch (locknum) {
708         case TG3_APE_LOCK_GPIO:
709                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
710                         return;
711         case TG3_APE_LOCK_GRC:
712         case TG3_APE_LOCK_MEM:
713                 break;
714         default:
715                 return;
716         }
717
718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
719                 gnt = TG3_APE_LOCK_GRANT;
720         else
721                 gnt = TG3_APE_PER_LOCK_GRANT;
722
723         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724                 bit = APE_LOCK_GRANT_DRIVER;
725         else
726                 bit = 1 << tp->pci_fn;
727
728         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
729 }
730
731 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
732 {
733         int i;
734         u32 apedata;
735
736         /* NCSI does not support APE events */
737         if (tg3_flag(tp, APE_HAS_NCSI))
738                 return;
739
740         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
741         if (apedata != APE_SEG_SIG_MAGIC)
742                 return;
743
744         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
745         if (!(apedata & APE_FW_STATUS_READY))
746                 return;
747
748         /* Wait for up to 1 millisecond for APE to service previous event. */
749         for (i = 0; i < 10; i++) {
750                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
751                         return;
752
753                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
754
755                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
756                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
757                                         event | APE_EVENT_STATUS_EVENT_PENDING);
758
759                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
760
761                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
762                         break;
763
764                 udelay(100);
765         }
766
767         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
768                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
769 }
770
771 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
772 {
773         u32 event;
774         u32 apedata;
775
776         if (!tg3_flag(tp, ENABLE_APE))
777                 return;
778
779         switch (kind) {
780         case RESET_KIND_INIT:
781                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
782                                 APE_HOST_SEG_SIG_MAGIC);
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
784                                 APE_HOST_SEG_LEN_MAGIC);
785                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
786                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
787                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
788                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
789                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
790                                 APE_HOST_BEHAV_NO_PHYLOCK);
791                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
792                                     TG3_APE_HOST_DRVR_STATE_START);
793
794                 event = APE_EVENT_STATUS_STATE_START;
795                 break;
796         case RESET_KIND_SHUTDOWN:
797                 /* With the interface we are currently using,
798                  * APE does not track driver state.  Wiping
799                  * out the HOST SEGMENT SIGNATURE forces
800                  * the APE to assume OS absent status.
801                  */
802                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
803
804                 if (device_may_wakeup(&tp->pdev->dev) &&
805                     tg3_flag(tp, WOL_ENABLE)) {
806                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
807                                             TG3_APE_HOST_WOL_SPEED_AUTO);
808                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
809                 } else
810                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
811
812                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
813
814                 event = APE_EVENT_STATUS_STATE_UNLOAD;
815                 break;
816         case RESET_KIND_SUSPEND:
817                 event = APE_EVENT_STATUS_STATE_SUSPEND;
818                 break;
819         default:
820                 return;
821         }
822
823         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
824
825         tg3_ape_send_event(tp, event);
826 }
827
828 static void tg3_disable_ints(struct tg3 *tp)
829 {
830         int i;
831
832         tw32(TG3PCI_MISC_HOST_CTRL,
833              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
834         for (i = 0; i < tp->irq_max; i++)
835                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
836 }
837
838 static void tg3_enable_ints(struct tg3 *tp)
839 {
840         int i;
841
842         tp->irq_sync = 0;
843         wmb();
844
845         tw32(TG3PCI_MISC_HOST_CTRL,
846              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
847
848         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
849         for (i = 0; i < tp->irq_cnt; i++) {
850                 struct tg3_napi *tnapi = &tp->napi[i];
851
852                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
853                 if (tg3_flag(tp, 1SHOT_MSI))
854                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855
856                 tp->coal_now |= tnapi->coal_now;
857         }
858
859         /* Force an initial interrupt */
860         if (!tg3_flag(tp, TAGGED_STATUS) &&
861             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
862                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
863         else
864                 tw32(HOSTCC_MODE, tp->coal_now);
865
866         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
867 }
868
869 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
870 {
871         struct tg3 *tp = tnapi->tp;
872         struct tg3_hw_status *sblk = tnapi->hw_status;
873         unsigned int work_exists = 0;
874
875         /* check for phy events */
876         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
877                 if (sblk->status & SD_STATUS_LINK_CHG)
878                         work_exists = 1;
879         }
880         /* check for RX/TX work to do */
881         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
882             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
883                 work_exists = 1;
884
885         return work_exists;
886 }
887
888 /* tg3_int_reenable
889  *  similar to tg3_enable_ints, but it accurately determines whether there
890  *  is new work pending and can return without flushing the PIO write
891  *  which reenables interrupts
892  */
893 static void tg3_int_reenable(struct tg3_napi *tnapi)
894 {
895         struct tg3 *tp = tnapi->tp;
896
897         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
898         mmiowb();
899
900         /* When doing tagged status, this work check is unnecessary.
901          * The last_tag we write above tells the chip which piece of
902          * work we've completed.
903          */
904         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
905                 tw32(HOSTCC_MODE, tp->coalesce_mode |
906                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
907 }
908
909 static void tg3_switch_clocks(struct tg3 *tp)
910 {
911         u32 clock_ctrl;
912         u32 orig_clock_ctrl;
913
914         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
915                 return;
916
917         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
918
919         orig_clock_ctrl = clock_ctrl;
920         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
921                        CLOCK_CTRL_CLKRUN_OENABLE |
922                        0x1f);
923         tp->pci_clock_ctrl = clock_ctrl;
924
925         if (tg3_flag(tp, 5705_PLUS)) {
926                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
927                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
928                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
929                 }
930         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
931                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
932                             clock_ctrl |
933                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
934                             40);
935                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
937                             40);
938         }
939         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
940 }
941
942 #define PHY_BUSY_LOOPS  5000
943
944 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
945 {
946         u32 frame_val;
947         unsigned int loops;
948         int ret;
949
950         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
951                 tw32_f(MAC_MI_MODE,
952                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
953                 udelay(80);
954         }
955
956         *val = 0x0;
957
958         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
959                       MI_COM_PHY_ADDR_MASK);
960         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
961                       MI_COM_REG_ADDR_MASK);
962         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
963
964         tw32_f(MAC_MI_COM, frame_val);
965
966         loops = PHY_BUSY_LOOPS;
967         while (loops != 0) {
968                 udelay(10);
969                 frame_val = tr32(MAC_MI_COM);
970
971                 if ((frame_val & MI_COM_BUSY) == 0) {
972                         udelay(5);
973                         frame_val = tr32(MAC_MI_COM);
974                         break;
975                 }
976                 loops -= 1;
977         }
978
979         ret = -EBUSY;
980         if (loops != 0) {
981                 *val = frame_val & MI_COM_DATA_MASK;
982                 ret = 0;
983         }
984
985         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
986                 tw32_f(MAC_MI_MODE, tp->mi_mode);
987                 udelay(80);
988         }
989
990         return ret;
991 }
992
993 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
994 {
995         u32 frame_val;
996         unsigned int loops;
997         int ret;
998
999         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1000             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1001                 return 0;
1002
1003         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1004                 tw32_f(MAC_MI_MODE,
1005                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1006                 udelay(80);
1007         }
1008
1009         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1010                       MI_COM_PHY_ADDR_MASK);
1011         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1012                       MI_COM_REG_ADDR_MASK);
1013         frame_val |= (val & MI_COM_DATA_MASK);
1014         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1015
1016         tw32_f(MAC_MI_COM, frame_val);
1017
1018         loops = PHY_BUSY_LOOPS;
1019         while (loops != 0) {
1020                 udelay(10);
1021                 frame_val = tr32(MAC_MI_COM);
1022                 if ((frame_val & MI_COM_BUSY) == 0) {
1023                         udelay(5);
1024                         frame_val = tr32(MAC_MI_COM);
1025                         break;
1026                 }
1027                 loops -= 1;
1028         }
1029
1030         ret = -EBUSY;
1031         if (loops != 0)
1032                 ret = 0;
1033
1034         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1035                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1036                 udelay(80);
1037         }
1038
1039         return ret;
1040 }
1041
1042 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1043 {
1044         int err;
1045
1046         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1047         if (err)
1048                 goto done;
1049
1050         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1051         if (err)
1052                 goto done;
1053
1054         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1055                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1056         if (err)
1057                 goto done;
1058
1059         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1060
1061 done:
1062         return err;
1063 }
1064
1065 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1066 {
1067         int err;
1068
1069         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1070         if (err)
1071                 goto done;
1072
1073         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1074         if (err)
1075                 goto done;
1076
1077         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1078                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1079         if (err)
1080                 goto done;
1081
1082         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1083
1084 done:
1085         return err;
1086 }
1087
1088 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1089 {
1090         int err;
1091
1092         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1093         if (!err)
1094                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1095
1096         return err;
1097 }
1098
1099 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1100 {
1101         int err;
1102
1103         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1104         if (!err)
1105                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1106
1107         return err;
1108 }
1109
1110 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1111 {
1112         int err;
1113
1114         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1115                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1116                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1117         if (!err)
1118                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1119
1120         return err;
1121 }
1122
1123 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1124 {
1125         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1126                 set |= MII_TG3_AUXCTL_MISC_WREN;
1127
1128         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1129 }
1130
1131 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1132         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1133                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1134                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1135
1136 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1137         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1138                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1139
1140 static int tg3_bmcr_reset(struct tg3 *tp)
1141 {
1142         u32 phy_control;
1143         int limit, err;
1144
1145         /* OK, reset it, and poll the BMCR_RESET bit until it
1146          * clears or we time out.
1147          */
1148         phy_control = BMCR_RESET;
1149         err = tg3_writephy(tp, MII_BMCR, phy_control);
1150         if (err != 0)
1151                 return -EBUSY;
1152
1153         limit = 5000;
1154         while (limit--) {
1155                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1156                 if (err != 0)
1157                         return -EBUSY;
1158
1159                 if ((phy_control & BMCR_RESET) == 0) {
1160                         udelay(40);
1161                         break;
1162                 }
1163                 udelay(10);
1164         }
1165         if (limit < 0)
1166                 return -EBUSY;
1167
1168         return 0;
1169 }
1170
1171 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1172 {
1173         struct tg3 *tp = bp->priv;
1174         u32 val;
1175
1176         spin_lock_bh(&tp->lock);
1177
1178         if (tg3_readphy(tp, reg, &val))
1179                 val = -EIO;
1180
1181         spin_unlock_bh(&tp->lock);
1182
1183         return val;
1184 }
1185
1186 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1187 {
1188         struct tg3 *tp = bp->priv;
1189         u32 ret = 0;
1190
1191         spin_lock_bh(&tp->lock);
1192
1193         if (tg3_writephy(tp, reg, val))
1194                 ret = -EIO;
1195
1196         spin_unlock_bh(&tp->lock);
1197
1198         return ret;
1199 }
1200
1201 static int tg3_mdio_reset(struct mii_bus *bp)
1202 {
1203         return 0;
1204 }
1205
1206 static void tg3_mdio_config_5785(struct tg3 *tp)
1207 {
1208         u32 val;
1209         struct phy_device *phydev;
1210
1211         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1212         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1213         case PHY_ID_BCM50610:
1214         case PHY_ID_BCM50610M:
1215                 val = MAC_PHYCFG2_50610_LED_MODES;
1216                 break;
1217         case PHY_ID_BCMAC131:
1218                 val = MAC_PHYCFG2_AC131_LED_MODES;
1219                 break;
1220         case PHY_ID_RTL8211C:
1221                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1222                 break;
1223         case PHY_ID_RTL8201E:
1224                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1225                 break;
1226         default:
1227                 return;
1228         }
1229
1230         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1231                 tw32(MAC_PHYCFG2, val);
1232
1233                 val = tr32(MAC_PHYCFG1);
1234                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1235                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1236                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1237                 tw32(MAC_PHYCFG1, val);
1238
1239                 return;
1240         }
1241
1242         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1243                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1244                        MAC_PHYCFG2_FMODE_MASK_MASK |
1245                        MAC_PHYCFG2_GMODE_MASK_MASK |
1246                        MAC_PHYCFG2_ACT_MASK_MASK   |
1247                        MAC_PHYCFG2_QUAL_MASK_MASK |
1248                        MAC_PHYCFG2_INBAND_ENABLE;
1249
1250         tw32(MAC_PHYCFG2, val);
1251
1252         val = tr32(MAC_PHYCFG1);
1253         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1254                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1255         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1256                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1257                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1260         }
1261         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1262                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1263         tw32(MAC_PHYCFG1, val);
1264
1265         val = tr32(MAC_EXT_RGMII_MODE);
1266         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1267                  MAC_RGMII_MODE_RX_QUALITY |
1268                  MAC_RGMII_MODE_RX_ACTIVITY |
1269                  MAC_RGMII_MODE_RX_ENG_DET |
1270                  MAC_RGMII_MODE_TX_ENABLE |
1271                  MAC_RGMII_MODE_TX_LOWPWR |
1272                  MAC_RGMII_MODE_TX_RESET);
1273         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1274                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1275                         val |= MAC_RGMII_MODE_RX_INT_B |
1276                                MAC_RGMII_MODE_RX_QUALITY |
1277                                MAC_RGMII_MODE_RX_ACTIVITY |
1278                                MAC_RGMII_MODE_RX_ENG_DET;
1279                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1280                         val |= MAC_RGMII_MODE_TX_ENABLE |
1281                                MAC_RGMII_MODE_TX_LOWPWR |
1282                                MAC_RGMII_MODE_TX_RESET;
1283         }
1284         tw32(MAC_EXT_RGMII_MODE, val);
1285 }
1286
1287 static void tg3_mdio_start(struct tg3 *tp)
1288 {
1289         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1290         tw32_f(MAC_MI_MODE, tp->mi_mode);
1291         udelay(80);
1292
1293         if (tg3_flag(tp, MDIOBUS_INITED) &&
1294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1295                 tg3_mdio_config_5785(tp);
1296 }
1297
1298 static int tg3_mdio_init(struct tg3 *tp)
1299 {
1300         int i;
1301         u32 reg;
1302         struct phy_device *phydev;
1303
1304         if (tg3_flag(tp, 5717_PLUS)) {
1305                 u32 is_serdes;
1306
1307                 tp->phy_addr = tp->pci_fn + 1;
1308
1309                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1310                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1311                 else
1312                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1313                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1314                 if (is_serdes)
1315                         tp->phy_addr += 7;
1316         } else
1317                 tp->phy_addr = TG3_PHY_MII_ADDR;
1318
1319         tg3_mdio_start(tp);
1320
1321         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1322                 return 0;
1323
1324         tp->mdio_bus = mdiobus_alloc();
1325         if (tp->mdio_bus == NULL)
1326                 return -ENOMEM;
1327
1328         tp->mdio_bus->name     = "tg3 mdio bus";
1329         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1330                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1331         tp->mdio_bus->priv     = tp;
1332         tp->mdio_bus->parent   = &tp->pdev->dev;
1333         tp->mdio_bus->read     = &tg3_mdio_read;
1334         tp->mdio_bus->write    = &tg3_mdio_write;
1335         tp->mdio_bus->reset    = &tg3_mdio_reset;
1336         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1337         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1338
1339         for (i = 0; i < PHY_MAX_ADDR; i++)
1340                 tp->mdio_bus->irq[i] = PHY_POLL;
1341
1342         /* The bus registration will look for all the PHYs on the mdio bus.
1343          * Unfortunately, it does not ensure the PHY is powered up before
1344          * accessing the PHY ID registers.  A chip reset is the
1345          * quickest way to bring the device back to an operational state..
1346          */
1347         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1348                 tg3_bmcr_reset(tp);
1349
1350         i = mdiobus_register(tp->mdio_bus);
1351         if (i) {
1352                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1353                 mdiobus_free(tp->mdio_bus);
1354                 return i;
1355         }
1356
1357         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1358
1359         if (!phydev || !phydev->drv) {
1360                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1361                 mdiobus_unregister(tp->mdio_bus);
1362                 mdiobus_free(tp->mdio_bus);
1363                 return -ENODEV;
1364         }
1365
1366         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1367         case PHY_ID_BCM57780:
1368                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1369                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1370                 break;
1371         case PHY_ID_BCM50610:
1372         case PHY_ID_BCM50610M:
1373                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1374                                      PHY_BRCM_RX_REFCLK_UNUSED |
1375                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1376                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1378                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1379                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1380                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1383                 /* fallthru */
1384         case PHY_ID_RTL8211C:
1385                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1386                 break;
1387         case PHY_ID_RTL8201E:
1388         case PHY_ID_BCMAC131:
1389                 phydev->interface = PHY_INTERFACE_MODE_MII;
1390                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1391                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1392                 break;
1393         }
1394
1395         tg3_flag_set(tp, MDIOBUS_INITED);
1396
1397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1398                 tg3_mdio_config_5785(tp);
1399
1400         return 0;
1401 }
1402
1403 static void tg3_mdio_fini(struct tg3 *tp)
1404 {
1405         if (tg3_flag(tp, MDIOBUS_INITED)) {
1406                 tg3_flag_clear(tp, MDIOBUS_INITED);
1407                 mdiobus_unregister(tp->mdio_bus);
1408                 mdiobus_free(tp->mdio_bus);
1409         }
1410 }
1411
1412 /* tp->lock is held. */
1413 static inline void tg3_generate_fw_event(struct tg3 *tp)
1414 {
1415         u32 val;
1416
1417         val = tr32(GRC_RX_CPU_EVENT);
1418         val |= GRC_RX_CPU_DRIVER_EVENT;
1419         tw32_f(GRC_RX_CPU_EVENT, val);
1420
1421         tp->last_event_jiffies = jiffies;
1422 }
1423
1424 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1425
1426 /* tp->lock is held. */
1427 static void tg3_wait_for_event_ack(struct tg3 *tp)
1428 {
1429         int i;
1430         unsigned int delay_cnt;
1431         long time_remain;
1432
1433         /* If enough time has passed, no wait is necessary. */
1434         time_remain = (long)(tp->last_event_jiffies + 1 +
1435                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1436                       (long)jiffies;
1437         if (time_remain < 0)
1438                 return;
1439
1440         /* Check if we can shorten the wait time. */
1441         delay_cnt = jiffies_to_usecs(time_remain);
1442         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1443                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1444         delay_cnt = (delay_cnt >> 3) + 1;
1445
1446         for (i = 0; i < delay_cnt; i++) {
1447                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1448                         break;
1449                 udelay(8);
1450         }
1451 }
1452
1453 /* tp->lock is held. */
1454 static void tg3_ump_link_report(struct tg3 *tp)
1455 {
1456         u32 reg;
1457         u32 val;
1458
1459         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1460                 return;
1461
1462         tg3_wait_for_event_ack(tp);
1463
1464         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1467
1468         val = 0;
1469         if (!tg3_readphy(tp, MII_BMCR, &reg))
1470                 val = reg << 16;
1471         if (!tg3_readphy(tp, MII_BMSR, &reg))
1472                 val |= (reg & 0xffff);
1473         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1474
1475         val = 0;
1476         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1477                 val = reg << 16;
1478         if (!tg3_readphy(tp, MII_LPA, &reg))
1479                 val |= (reg & 0xffff);
1480         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1481
1482         val = 0;
1483         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1484                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1485                         val = reg << 16;
1486                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1487                         val |= (reg & 0xffff);
1488         }
1489         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1490
1491         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1492                 val = reg << 16;
1493         else
1494                 val = 0;
1495         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1496
1497         tg3_generate_fw_event(tp);
1498 }
1499
1500 /* tp->lock is held. */
1501 static void tg3_stop_fw(struct tg3 *tp)
1502 {
1503         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1504                 /* Wait for RX cpu to ACK the previous event. */
1505                 tg3_wait_for_event_ack(tp);
1506
1507                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1508
1509                 tg3_generate_fw_event(tp);
1510
1511                 /* Wait for RX cpu to ACK this event. */
1512                 tg3_wait_for_event_ack(tp);
1513         }
1514 }
1515
1516 /* tp->lock is held. */
1517 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1518 {
1519         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1520                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1521
1522         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1523                 switch (kind) {
1524                 case RESET_KIND_INIT:
1525                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1526                                       DRV_STATE_START);
1527                         break;
1528
1529                 case RESET_KIND_SHUTDOWN:
1530                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1531                                       DRV_STATE_UNLOAD);
1532                         break;
1533
1534                 case RESET_KIND_SUSPEND:
1535                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1536                                       DRV_STATE_SUSPEND);
1537                         break;
1538
1539                 default:
1540                         break;
1541                 }
1542         }
1543
1544         if (kind == RESET_KIND_INIT ||
1545             kind == RESET_KIND_SUSPEND)
1546                 tg3_ape_driver_state_change(tp, kind);
1547 }
1548
1549 /* tp->lock is held. */
1550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1551 {
1552         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1553                 switch (kind) {
1554                 case RESET_KIND_INIT:
1555                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1556                                       DRV_STATE_START_DONE);
1557                         break;
1558
1559                 case RESET_KIND_SHUTDOWN:
1560                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561                                       DRV_STATE_UNLOAD_DONE);
1562                         break;
1563
1564                 default:
1565                         break;
1566                 }
1567         }
1568
1569         if (kind == RESET_KIND_SHUTDOWN)
1570                 tg3_ape_driver_state_change(tp, kind);
1571 }
1572
1573 /* tp->lock is held. */
1574 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1575 {
1576         if (tg3_flag(tp, ENABLE_ASF)) {
1577                 switch (kind) {
1578                 case RESET_KIND_INIT:
1579                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580                                       DRV_STATE_START);
1581                         break;
1582
1583                 case RESET_KIND_SHUTDOWN:
1584                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1585                                       DRV_STATE_UNLOAD);
1586                         break;
1587
1588                 case RESET_KIND_SUSPEND:
1589                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1590                                       DRV_STATE_SUSPEND);
1591                         break;
1592
1593                 default:
1594                         break;
1595                 }
1596         }
1597 }
1598
1599 static int tg3_poll_fw(struct tg3 *tp)
1600 {
1601         int i;
1602         u32 val;
1603
1604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1605                 /* Wait up to 20ms for init done. */
1606                 for (i = 0; i < 200; i++) {
1607                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1608                                 return 0;
1609                         udelay(100);
1610                 }
1611                 return -ENODEV;
1612         }
1613
1614         /* Wait for firmware initialization to complete. */
1615         for (i = 0; i < 100000; i++) {
1616                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1617                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1618                         break;
1619                 udelay(10);
1620         }
1621
1622         /* Chip might not be fitted with firmware.  Some Sun onboard
1623          * parts are configured like that.  So don't signal the timeout
1624          * of the above loop as an error, but do report the lack of
1625          * running firmware once.
1626          */
1627         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1628                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1629
1630                 netdev_info(tp->dev, "No firmware running\n");
1631         }
1632
1633         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1634                 /* The 57765 A0 needs a little more
1635                  * time to do some important work.
1636                  */
1637                 mdelay(10);
1638         }
1639
1640         return 0;
1641 }
1642
1643 static void tg3_link_report(struct tg3 *tp)
1644 {
1645         if (!netif_carrier_ok(tp->dev)) {
1646                 netif_info(tp, link, tp->dev, "Link is down\n");
1647                 tg3_ump_link_report(tp);
1648         } else if (netif_msg_link(tp)) {
1649                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1650                             (tp->link_config.active_speed == SPEED_1000 ?
1651                              1000 :
1652                              (tp->link_config.active_speed == SPEED_100 ?
1653                               100 : 10)),
1654                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1655                              "full" : "half"));
1656
1657                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1658                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1659                             "on" : "off",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1661                             "on" : "off");
1662
1663                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1664                         netdev_info(tp->dev, "EEE is %s\n",
1665                                     tp->setlpicnt ? "enabled" : "disabled");
1666
1667                 tg3_ump_link_report(tp);
1668         }
1669 }
1670
1671 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1672 {
1673         u16 miireg;
1674
1675         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1676                 miireg = ADVERTISE_PAUSE_CAP;
1677         else if (flow_ctrl & FLOW_CTRL_TX)
1678                 miireg = ADVERTISE_PAUSE_ASYM;
1679         else if (flow_ctrl & FLOW_CTRL_RX)
1680                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1681         else
1682                 miireg = 0;
1683
1684         return miireg;
1685 }
1686
1687 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1688 {
1689         u16 miireg;
1690
1691         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1692                 miireg = ADVERTISE_1000XPAUSE;
1693         else if (flow_ctrl & FLOW_CTRL_TX)
1694                 miireg = ADVERTISE_1000XPSE_ASYM;
1695         else if (flow_ctrl & FLOW_CTRL_RX)
1696                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1697         else
1698                 miireg = 0;
1699
1700         return miireg;
1701 }
1702
1703 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1704 {
1705         u8 cap = 0;
1706
1707         if (lcladv & ADVERTISE_1000XPAUSE) {
1708                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1709                         if (rmtadv & LPA_1000XPAUSE)
1710                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1712                                 cap = FLOW_CTRL_RX;
1713                 } else {
1714                         if (rmtadv & LPA_1000XPAUSE)
1715                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1716                 }
1717         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1718                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1719                         cap = FLOW_CTRL_TX;
1720         }
1721
1722         return cap;
1723 }
1724
1725 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1726 {
1727         u8 autoneg;
1728         u8 flowctrl = 0;
1729         u32 old_rx_mode = tp->rx_mode;
1730         u32 old_tx_mode = tp->tx_mode;
1731
1732         if (tg3_flag(tp, USE_PHYLIB))
1733                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1734         else
1735                 autoneg = tp->link_config.autoneg;
1736
1737         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1738                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1739                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1740                 else
1741                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1742         } else
1743                 flowctrl = tp->link_config.flowctrl;
1744
1745         tp->link_config.active_flowctrl = flowctrl;
1746
1747         if (flowctrl & FLOW_CTRL_RX)
1748                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1749         else
1750                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1751
1752         if (old_rx_mode != tp->rx_mode)
1753                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1754
1755         if (flowctrl & FLOW_CTRL_TX)
1756                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1757         else
1758                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1759
1760         if (old_tx_mode != tp->tx_mode)
1761                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1762 }
1763
1764 static void tg3_adjust_link(struct net_device *dev)
1765 {
1766         u8 oldflowctrl, linkmesg = 0;
1767         u32 mac_mode, lcl_adv, rmt_adv;
1768         struct tg3 *tp = netdev_priv(dev);
1769         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1770
1771         spin_lock_bh(&tp->lock);
1772
1773         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1774                                     MAC_MODE_HALF_DUPLEX);
1775
1776         oldflowctrl = tp->link_config.active_flowctrl;
1777
1778         if (phydev->link) {
1779                 lcl_adv = 0;
1780                 rmt_adv = 0;
1781
1782                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1783                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1784                 else if (phydev->speed == SPEED_1000 ||
1785                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1786                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1787                 else
1788                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1789
1790                 if (phydev->duplex == DUPLEX_HALF)
1791                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1792                 else {
1793                         lcl_adv = tg3_advert_flowctrl_1000T(
1794                                   tp->link_config.flowctrl);
1795
1796                         if (phydev->pause)
1797                                 rmt_adv = LPA_PAUSE_CAP;
1798                         if (phydev->asym_pause)
1799                                 rmt_adv |= LPA_PAUSE_ASYM;
1800                 }
1801
1802                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1803         } else
1804                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1805
1806         if (mac_mode != tp->mac_mode) {
1807                 tp->mac_mode = mac_mode;
1808                 tw32_f(MAC_MODE, tp->mac_mode);
1809                 udelay(40);
1810         }
1811
1812         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1813                 if (phydev->speed == SPEED_10)
1814                         tw32(MAC_MI_STAT,
1815                              MAC_MI_STAT_10MBPS_MODE |
1816                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1817                 else
1818                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1819         }
1820
1821         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1822                 tw32(MAC_TX_LENGTHS,
1823                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824                       (6 << TX_LENGTHS_IPG_SHIFT) |
1825                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1826         else
1827                 tw32(MAC_TX_LENGTHS,
1828                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1829                       (6 << TX_LENGTHS_IPG_SHIFT) |
1830                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1831
1832         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1833             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1834             phydev->speed != tp->link_config.active_speed ||
1835             phydev->duplex != tp->link_config.active_duplex ||
1836             oldflowctrl != tp->link_config.active_flowctrl)
1837                 linkmesg = 1;
1838
1839         tp->link_config.active_speed = phydev->speed;
1840         tp->link_config.active_duplex = phydev->duplex;
1841
1842         spin_unlock_bh(&tp->lock);
1843
1844         if (linkmesg)
1845                 tg3_link_report(tp);
1846 }
1847
1848 static int tg3_phy_init(struct tg3 *tp)
1849 {
1850         struct phy_device *phydev;
1851
1852         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1853                 return 0;
1854
1855         /* Bring the PHY back to a known state. */
1856         tg3_bmcr_reset(tp);
1857
1858         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1859
1860         /* Attach the MAC to the PHY. */
1861         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1862                              phydev->dev_flags, phydev->interface);
1863         if (IS_ERR(phydev)) {
1864                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1865                 return PTR_ERR(phydev);
1866         }
1867
1868         /* Mask with MAC supported features. */
1869         switch (phydev->interface) {
1870         case PHY_INTERFACE_MODE_GMII:
1871         case PHY_INTERFACE_MODE_RGMII:
1872                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1873                         phydev->supported &= (PHY_GBIT_FEATURES |
1874                                               SUPPORTED_Pause |
1875                                               SUPPORTED_Asym_Pause);
1876                         break;
1877                 }
1878                 /* fallthru */
1879         case PHY_INTERFACE_MODE_MII:
1880                 phydev->supported &= (PHY_BASIC_FEATURES |
1881                                       SUPPORTED_Pause |
1882                                       SUPPORTED_Asym_Pause);
1883                 break;
1884         default:
1885                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1886                 return -EINVAL;
1887         }
1888
1889         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1890
1891         phydev->advertising = phydev->supported;
1892
1893         return 0;
1894 }
1895
1896 static void tg3_phy_start(struct tg3 *tp)
1897 {
1898         struct phy_device *phydev;
1899
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1904
1905         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1906                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1907                 phydev->speed = tp->link_config.orig_speed;
1908                 phydev->duplex = tp->link_config.orig_duplex;
1909                 phydev->autoneg = tp->link_config.orig_autoneg;
1910                 phydev->advertising = tp->link_config.orig_advertising;
1911         }
1912
1913         phy_start(phydev);
1914
1915         phy_start_aneg(phydev);
1916 }
1917
1918 static void tg3_phy_stop(struct tg3 *tp)
1919 {
1920         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1921                 return;
1922
1923         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1924 }
1925
1926 static void tg3_phy_fini(struct tg3 *tp)
1927 {
1928         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1929                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1930                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1931         }
1932 }
1933
1934 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1935 {
1936         int err;
1937         u32 val;
1938
1939         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1940                 return 0;
1941
1942         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1943                 /* Cannot do read-modify-write on 5401 */
1944                 err = tg3_phy_auxctl_write(tp,
1945                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1946                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1947                                            0x4c20);
1948                 goto done;
1949         }
1950
1951         err = tg3_phy_auxctl_read(tp,
1952                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1953         if (err)
1954                 return err;
1955
1956         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1957         err = tg3_phy_auxctl_write(tp,
1958                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1959
1960 done:
1961         return err;
1962 }
1963
1964 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 phytest;
1967
1968         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1969                 u32 phy;
1970
1971                 tg3_writephy(tp, MII_TG3_FET_TEST,
1972                              phytest | MII_TG3_FET_SHADOW_EN);
1973                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1974                         if (enable)
1975                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1976                         else
1977                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1979                 }
1980                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1981         }
1982 }
1983
1984 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1985 {
1986         u32 reg;
1987
1988         if (!tg3_flag(tp, 5705_PLUS) ||
1989             (tg3_flag(tp, 5717_PLUS) &&
1990              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1991                 return;
1992
1993         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1994                 tg3_phy_fet_toggle_apd(tp, enable);
1995                 return;
1996         }
1997
1998         reg = MII_TG3_MISC_SHDW_WREN |
1999               MII_TG3_MISC_SHDW_SCR5_SEL |
2000               MII_TG3_MISC_SHDW_SCR5_LPED |
2001               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2002               MII_TG3_MISC_SHDW_SCR5_SDTL |
2003               MII_TG3_MISC_SHDW_SCR5_C125OE;
2004         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2005                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2006
2007         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2008
2009
2010         reg = MII_TG3_MISC_SHDW_WREN |
2011               MII_TG3_MISC_SHDW_APD_SEL |
2012               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2013         if (enable)
2014                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2015
2016         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2017 }
2018
2019 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2020 {
2021         u32 phy;
2022
2023         if (!tg3_flag(tp, 5705_PLUS) ||
2024             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2025                 return;
2026
2027         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2028                 u32 ephy;
2029
2030                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2031                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2032
2033                         tg3_writephy(tp, MII_TG3_FET_TEST,
2034                                      ephy | MII_TG3_FET_SHADOW_EN);
2035                         if (!tg3_readphy(tp, reg, &phy)) {
2036                                 if (enable)
2037                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2038                                 else
2039                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040                                 tg3_writephy(tp, reg, phy);
2041                         }
2042                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2043                 }
2044         } else {
2045                 int ret;
2046
2047                 ret = tg3_phy_auxctl_read(tp,
2048                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2049                 if (!ret) {
2050                         if (enable)
2051                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2052                         else
2053                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054                         tg3_phy_auxctl_write(tp,
2055                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2056                 }
2057         }
2058 }
2059
2060 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2061 {
2062         int ret;
2063         u32 val;
2064
2065         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2066                 return;
2067
2068         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2069         if (!ret)
2070                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2071                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2072 }
2073
2074 static void tg3_phy_apply_otp(struct tg3 *tp)
2075 {
2076         u32 otp, phy;
2077
2078         if (!tp->phy_otp)
2079                 return;
2080
2081         otp = tp->phy_otp;
2082
2083         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2084                 return;
2085
2086         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2087         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2088         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2089
2090         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2091               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2093
2094         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2095         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2096         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2097
2098         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2099         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2100
2101         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2102         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2103
2104         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2105               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2106         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2107
2108         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109 }
2110
2111 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2112 {
2113         u32 val;
2114
2115         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2116                 return;
2117
2118         tp->setlpicnt = 0;
2119
2120         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2121             current_link_up == 1 &&
2122             tp->link_config.active_duplex == DUPLEX_FULL &&
2123             (tp->link_config.active_speed == SPEED_100 ||
2124              tp->link_config.active_speed == SPEED_1000)) {
2125                 u32 eeectl;
2126
2127                 if (tp->link_config.active_speed == SPEED_1000)
2128                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2129                 else
2130                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2131
2132                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2133
2134                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2135                                   TG3_CL45_D7_EEERES_STAT, &val);
2136
2137                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2138                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2139                         tp->setlpicnt = 2;
2140         }
2141
2142         if (!tp->setlpicnt) {
2143                 if (current_link_up == 1 &&
2144                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2145                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2146                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147                 }
2148
2149                 val = tr32(TG3_CPMU_EEE_MODE);
2150                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2151         }
2152 }
2153
2154 static void tg3_phy_eee_enable(struct tg3 *tp)
2155 {
2156         u32 val;
2157
2158         if (tp->link_config.active_speed == SPEED_1000 &&
2159             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2160              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2161              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2162             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2163                 val = MII_TG3_DSP_TAP26_ALNOKO |
2164                       MII_TG3_DSP_TAP26_RMRXSTO;
2165                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2166                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167         }
2168
2169         val = tr32(TG3_CPMU_EEE_MODE);
2170         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2171 }
2172
2173 static int tg3_wait_macro_done(struct tg3 *tp)
2174 {
2175         int limit = 100;
2176
2177         while (limit--) {
2178                 u32 tmp32;
2179
2180                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2181                         if ((tmp32 & 0x1000) == 0)
2182                                 break;
2183                 }
2184         }
2185         if (limit < 0)
2186                 return -EBUSY;
2187
2188         return 0;
2189 }
2190
2191 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2192 {
2193         static const u32 test_pat[4][6] = {
2194         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2195         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2196         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2197         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2198         };
2199         int chan;
2200
2201         for (chan = 0; chan < 4; chan++) {
2202                 int i;
2203
2204                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2205                              (chan * 0x2000) | 0x0200);
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2207
2208                 for (i = 0; i < 6; i++)
2209                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2210                                      test_pat[chan][i]);
2211
2212                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2213                 if (tg3_wait_macro_done(tp)) {
2214                         *resetp = 1;
2215                         return -EBUSY;
2216                 }
2217
2218                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2219                              (chan * 0x2000) | 0x0200);
2220                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2221                 if (tg3_wait_macro_done(tp)) {
2222                         *resetp = 1;
2223                         return -EBUSY;
2224                 }
2225
2226                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2227                 if (tg3_wait_macro_done(tp)) {
2228                         *resetp = 1;
2229                         return -EBUSY;
2230                 }
2231
2232                 for (i = 0; i < 6; i += 2) {
2233                         u32 low, high;
2234
2235                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2236                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2237                             tg3_wait_macro_done(tp)) {
2238                                 *resetp = 1;
2239                                 return -EBUSY;
2240                         }
2241                         low &= 0x7fff;
2242                         high &= 0x000f;
2243                         if (low != test_pat[chan][i] ||
2244                             high != test_pat[chan][i+1]) {
2245                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2246                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2247                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2248
2249                                 return -EBUSY;
2250                         }
2251                 }
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2258 {
2259         int chan;
2260
2261         for (chan = 0; chan < 4; chan++) {
2262                 int i;
2263
2264                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2265                              (chan * 0x2000) | 0x0200);
2266                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2267                 for (i = 0; i < 6; i++)
2268                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2269                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2270                 if (tg3_wait_macro_done(tp))
2271                         return -EBUSY;
2272         }
2273
2274         return 0;
2275 }
2276
2277 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2278 {
2279         u32 reg32, phy9_orig;
2280         int retries, do_phy_reset, err;
2281
2282         retries = 10;
2283         do_phy_reset = 1;
2284         do {
2285                 if (do_phy_reset) {
2286                         err = tg3_bmcr_reset(tp);
2287                         if (err)
2288                                 return err;
2289                         do_phy_reset = 0;
2290                 }
2291
2292                 /* Disable transmitter and interrupt.  */
2293                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2294                         continue;
2295
2296                 reg32 |= 0x3000;
2297                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2298
2299                 /* Set full-duplex, 1000 mbps.  */
2300                 tg3_writephy(tp, MII_BMCR,
2301                              BMCR_FULLDPLX | BMCR_SPEED1000);
2302
2303                 /* Set to master mode.  */
2304                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2305                         continue;
2306
2307                 tg3_writephy(tp, MII_CTRL1000,
2308                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2309
2310                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2311                 if (err)
2312                         return err;
2313
2314                 /* Block the PHY control access.  */
2315                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2316
2317                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2318                 if (!err)
2319                         break;
2320         } while (--retries);
2321
2322         err = tg3_phy_reset_chanpat(tp);
2323         if (err)
2324                 return err;
2325
2326         tg3_phydsp_write(tp, 0x8005, 0x0000);
2327
2328         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2329         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2330
2331         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2332
2333         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2334
2335         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2336                 reg32 &= ~0x3000;
2337                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2338         } else if (!err)
2339                 err = -EBUSY;
2340
2341         return err;
2342 }
2343
2344 /* This will reset the tigon3 PHY if there is no valid
2345  * link unless the FORCE argument is non-zero.
2346  */
2347 static int tg3_phy_reset(struct tg3 *tp)
2348 {
2349         u32 val, cpmuctrl;
2350         int err;
2351
2352         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2353                 val = tr32(GRC_MISC_CFG);
2354                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2355                 udelay(40);
2356         }
2357         err  = tg3_readphy(tp, MII_BMSR, &val);
2358         err |= tg3_readphy(tp, MII_BMSR, &val);
2359         if (err != 0)
2360                 return -EBUSY;
2361
2362         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2363                 netif_carrier_off(tp->dev);
2364                 tg3_link_report(tp);
2365         }
2366
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2370                 err = tg3_phy_reset_5703_4_5(tp);
2371                 if (err)
2372                         return err;
2373                 goto out;
2374         }
2375
2376         cpmuctrl = 0;
2377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2378             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2379                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2380                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2381                         tw32(TG3_CPMU_CTRL,
2382                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2383         }
2384
2385         err = tg3_bmcr_reset(tp);
2386         if (err)
2387                 return err;
2388
2389         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2390                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2391                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2392
2393                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2394         }
2395
2396         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2397             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2398                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2399                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2400                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2401                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2402                         udelay(40);
2403                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2404                 }
2405         }
2406
2407         if (tg3_flag(tp, 5717_PLUS) &&
2408             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2409                 return 0;
2410
2411         tg3_phy_apply_otp(tp);
2412
2413         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2414                 tg3_phy_toggle_apd(tp, true);
2415         else
2416                 tg3_phy_toggle_apd(tp, false);
2417
2418 out:
2419         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2420             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2421                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2422                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2423                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2424         }
2425
2426         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2427                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2428                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2429         }
2430
2431         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2432                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2433                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2434                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2435                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2436                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2437                 }
2438         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2439                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2440                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2441                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2442                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2443                                 tg3_writephy(tp, MII_TG3_TEST1,
2444                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2445                         } else
2446                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2447
2448                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2449                 }
2450         }
2451
2452         /* Set Extended packet length bit (bit 14) on all chips that */
2453         /* support jumbo frames */
2454         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2455                 /* Cannot do read-modify-write on 5401 */
2456                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2457         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2458                 /* Set bit 14 with read-modify-write to preserve other bits */
2459                 err = tg3_phy_auxctl_read(tp,
2460                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2461                 if (!err)
2462                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2463                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2464         }
2465
2466         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2467          * jumbo frames transmission.
2468          */
2469         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2470                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2471                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2472                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2473         }
2474
2475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2476                 /* adjust output voltage */
2477                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2478         }
2479
2480         tg3_phy_toggle_automdix(tp, 1);
2481         tg3_phy_set_wirespeed(tp);
2482         return 0;
2483 }
2484
2485 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2486 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2487 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2488                                           TG3_GPIO_MSG_NEED_VAUX)
2489 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2490         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2491          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2492          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2493          (TG3_GPIO_MSG_DRVR_PRES << 12))
2494
2495 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2496         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2497          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2498          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2499          (TG3_GPIO_MSG_NEED_VAUX << 12))
2500
2501 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2502 {
2503         u32 status, shift;
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2508         else
2509                 status = tr32(TG3_CPMU_DRV_STATUS);
2510
2511         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2512         status &= ~(TG3_GPIO_MSG_MASK << shift);
2513         status |= (newstat << shift);
2514
2515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2516             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2517                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2518         else
2519                 tw32(TG3_CPMU_DRV_STATUS, status);
2520
2521         return status >> TG3_APE_GPIO_MSG_SHIFT;
2522 }
2523
2524 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2525 {
2526         if (!tg3_flag(tp, IS_NIC))
2527                 return 0;
2528
2529         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2531             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2532                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2533                         return -EIO;
2534
2535                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2536
2537                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2539
2540                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2541         } else {
2542                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2543                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2544         }
2545
2546         return 0;
2547 }
2548
2549 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2550 {
2551         u32 grc_local_ctrl;
2552
2553         if (!tg3_flag(tp, IS_NIC) ||
2554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2556                 return;
2557
2558         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2559
2560         tw32_wait_f(GRC_LOCAL_CTRL,
2561                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2562                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2563
2564         tw32_wait_f(GRC_LOCAL_CTRL,
2565                     grc_local_ctrl,
2566                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2567
2568         tw32_wait_f(GRC_LOCAL_CTRL,
2569                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2570                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2571 }
2572
2573 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2574 {
2575         if (!tg3_flag(tp, IS_NIC))
2576                 return;
2577
2578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2580                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2581                             (GRC_LCLCTRL_GPIO_OE0 |
2582                              GRC_LCLCTRL_GPIO_OE1 |
2583                              GRC_LCLCTRL_GPIO_OE2 |
2584                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2585                              GRC_LCLCTRL_GPIO_OUTPUT1),
2586                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2587         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2588                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2589                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2590                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2591                                      GRC_LCLCTRL_GPIO_OE1 |
2592                                      GRC_LCLCTRL_GPIO_OE2 |
2593                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2594                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2595                                      tp->grc_local_ctrl;
2596                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2598
2599                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2600                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2602
2603                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2604                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2605                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2606         } else {
2607                 u32 no_gpio2;
2608                 u32 grc_local_ctrl = 0;
2609
2610                 /* Workaround to prevent overdrawing Amps. */
2611                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2612                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2613                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2614                                     grc_local_ctrl,
2615                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2616                 }
2617
2618                 /* On 5753 and variants, GPIO2 cannot be used. */
2619                 no_gpio2 = tp->nic_sram_data_cfg &
2620                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2621
2622                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2623                                   GRC_LCLCTRL_GPIO_OE1 |
2624                                   GRC_LCLCTRL_GPIO_OE2 |
2625                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2626                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2627                 if (no_gpio2) {
2628                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2629                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2630                 }
2631                 tw32_wait_f(GRC_LOCAL_CTRL,
2632                             tp->grc_local_ctrl | grc_local_ctrl,
2633                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2634
2635                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2636
2637                 tw32_wait_f(GRC_LOCAL_CTRL,
2638                             tp->grc_local_ctrl | grc_local_ctrl,
2639                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2640
2641                 if (!no_gpio2) {
2642                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2643                         tw32_wait_f(GRC_LOCAL_CTRL,
2644                                     tp->grc_local_ctrl | grc_local_ctrl,
2645                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2646                 }
2647         }
2648 }
2649
2650 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2651 {
2652         u32 msg = 0;
2653
2654         /* Serialize power state transitions */
2655         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2656                 return;
2657
2658         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2659                 msg = TG3_GPIO_MSG_NEED_VAUX;
2660
2661         msg = tg3_set_function_status(tp, msg);
2662
2663         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2664                 goto done;
2665
2666         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2667                 tg3_pwrsrc_switch_to_vaux(tp);
2668         else
2669                 tg3_pwrsrc_die_with_vmain(tp);
2670
2671 done:
2672         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2673 }
2674
2675 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2676 {
2677         bool need_vaux = false;
2678
2679         /* The GPIOs do something completely different on 57765. */
2680         if (!tg3_flag(tp, IS_NIC) ||
2681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2682                 return;
2683
2684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687                 tg3_frob_aux_power_5717(tp, include_wol ?
2688                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2689                 return;
2690         }
2691
2692         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2693                 struct net_device *dev_peer;
2694
2695                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2696
2697                 /* remove_one() may have been run on the peer. */
2698                 if (dev_peer) {
2699                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2700
2701                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2702                                 return;
2703
2704                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2705                             tg3_flag(tp_peer, ENABLE_ASF))
2706                                 need_vaux = true;
2707                 }
2708         }
2709
2710         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2711             tg3_flag(tp, ENABLE_ASF))
2712                 need_vaux = true;
2713
2714         if (need_vaux)
2715                 tg3_pwrsrc_switch_to_vaux(tp);
2716         else
2717                 tg3_pwrsrc_die_with_vmain(tp);
2718 }
2719
2720 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2721 {
2722         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2723                 return 1;
2724         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2725                 if (speed != SPEED_10)
2726                         return 1;
2727         } else if (speed == SPEED_10)
2728                 return 1;
2729
2730         return 0;
2731 }
2732
2733 static int tg3_setup_phy(struct tg3 *, int);
2734 static int tg3_halt_cpu(struct tg3 *, u32);
2735
2736 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2737 {
2738         u32 val;
2739
2740         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2741                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2742                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2743                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2744
2745                         sg_dig_ctrl |=
2746                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2747                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2748                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2749                 }
2750                 return;
2751         }
2752
2753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2754                 tg3_bmcr_reset(tp);
2755                 val = tr32(GRC_MISC_CFG);
2756                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2757                 udelay(40);
2758                 return;
2759         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2760                 u32 phytest;
2761                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2762                         u32 phy;
2763
2764                         tg3_writephy(tp, MII_ADVERTISE, 0);
2765                         tg3_writephy(tp, MII_BMCR,
2766                                      BMCR_ANENABLE | BMCR_ANRESTART);
2767
2768                         tg3_writephy(tp, MII_TG3_FET_TEST,
2769                                      phytest | MII_TG3_FET_SHADOW_EN);
2770                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2771                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2772                                 tg3_writephy(tp,
2773                                              MII_TG3_FET_SHDW_AUXMODE4,
2774                                              phy);
2775                         }
2776                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2777                 }
2778                 return;
2779         } else if (do_low_power) {
2780                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2781                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2782
2783                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2784                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2785                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2786                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2787         }
2788
2789         /* The PHY should not be powered down on some chips because
2790          * of bugs.
2791          */
2792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2794             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2795              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2796                 return;
2797
2798         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2799             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2800                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2801                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2802                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2803                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2804         }
2805
2806         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2807 }
2808
2809 /* tp->lock is held. */
2810 static int tg3_nvram_lock(struct tg3 *tp)
2811 {
2812         if (tg3_flag(tp, NVRAM)) {
2813                 int i;
2814
2815                 if (tp->nvram_lock_cnt == 0) {
2816                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2817                         for (i = 0; i < 8000; i++) {
2818                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2819                                         break;
2820                                 udelay(20);
2821                         }
2822                         if (i == 8000) {
2823                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2824                                 return -ENODEV;
2825                         }
2826                 }
2827                 tp->nvram_lock_cnt++;
2828         }
2829         return 0;
2830 }
2831
2832 /* tp->lock is held. */
2833 static void tg3_nvram_unlock(struct tg3 *tp)
2834 {
2835         if (tg3_flag(tp, NVRAM)) {
2836                 if (tp->nvram_lock_cnt > 0)
2837                         tp->nvram_lock_cnt--;
2838                 if (tp->nvram_lock_cnt == 0)
2839                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2840         }
2841 }
2842
2843 /* tp->lock is held. */
2844 static void tg3_enable_nvram_access(struct tg3 *tp)
2845 {
2846         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2847                 u32 nvaccess = tr32(NVRAM_ACCESS);
2848
2849                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2850         }
2851 }
2852
2853 /* tp->lock is held. */
2854 static void tg3_disable_nvram_access(struct tg3 *tp)
2855 {
2856         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2857                 u32 nvaccess = tr32(NVRAM_ACCESS);
2858
2859                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2860         }
2861 }
2862
2863 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2864                                         u32 offset, u32 *val)
2865 {
2866         u32 tmp;
2867         int i;
2868
2869         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2870                 return -EINVAL;
2871
2872         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2873                                         EEPROM_ADDR_DEVID_MASK |
2874                                         EEPROM_ADDR_READ);
2875         tw32(GRC_EEPROM_ADDR,
2876              tmp |
2877              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2878              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2879               EEPROM_ADDR_ADDR_MASK) |
2880              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2881
2882         for (i = 0; i < 1000; i++) {
2883                 tmp = tr32(GRC_EEPROM_ADDR);
2884
2885                 if (tmp & EEPROM_ADDR_COMPLETE)
2886                         break;
2887                 msleep(1);
2888         }
2889         if (!(tmp & EEPROM_ADDR_COMPLETE))
2890                 return -EBUSY;
2891
2892         tmp = tr32(GRC_EEPROM_DATA);
2893
2894         /*
2895          * The data will always be opposite the native endian
2896          * format.  Perform a blind byteswap to compensate.
2897          */
2898         *val = swab32(tmp);
2899
2900         return 0;
2901 }
2902
2903 #define NVRAM_CMD_TIMEOUT 10000
2904
2905 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2906 {
2907         int i;
2908
2909         tw32(NVRAM_CMD, nvram_cmd);
2910         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2911                 udelay(10);
2912                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2913                         udelay(10);
2914                         break;
2915                 }
2916         }
2917
2918         if (i == NVRAM_CMD_TIMEOUT)
2919                 return -EBUSY;
2920
2921         return 0;
2922 }
2923
2924 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2925 {
2926         if (tg3_flag(tp, NVRAM) &&
2927             tg3_flag(tp, NVRAM_BUFFERED) &&
2928             tg3_flag(tp, FLASH) &&
2929             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2930             (tp->nvram_jedecnum == JEDEC_ATMEL))
2931
2932                 addr = ((addr / tp->nvram_pagesize) <<
2933                         ATMEL_AT45DB0X1B_PAGE_POS) +
2934                        (addr % tp->nvram_pagesize);
2935
2936         return addr;
2937 }
2938
2939 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2940 {
2941         if (tg3_flag(tp, NVRAM) &&
2942             tg3_flag(tp, NVRAM_BUFFERED) &&
2943             tg3_flag(tp, FLASH) &&
2944             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2945             (tp->nvram_jedecnum == JEDEC_ATMEL))
2946
2947                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2948                         tp->nvram_pagesize) +
2949                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2950
2951         return addr;
2952 }
2953
2954 /* NOTE: Data read in from NVRAM is byteswapped according to
2955  * the byteswapping settings for all other register accesses.
2956  * tg3 devices are BE devices, so on a BE machine, the data
2957  * returned will be exactly as it is seen in NVRAM.  On a LE
2958  * machine, the 32-bit value will be byteswapped.
2959  */
2960 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2961 {
2962         int ret;
2963
2964         if (!tg3_flag(tp, NVRAM))
2965                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2966
2967         offset = tg3_nvram_phys_addr(tp, offset);
2968
2969         if (offset > NVRAM_ADDR_MSK)
2970                 return -EINVAL;
2971
2972         ret = tg3_nvram_lock(tp);
2973         if (ret)
2974                 return ret;
2975
2976         tg3_enable_nvram_access(tp);
2977
2978         tw32(NVRAM_ADDR, offset);
2979         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2980                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2981
2982         if (ret == 0)
2983                 *val = tr32(NVRAM_RDDATA);
2984
2985         tg3_disable_nvram_access(tp);
2986
2987         tg3_nvram_unlock(tp);
2988
2989         return ret;
2990 }
2991
2992 /* Ensures NVRAM data is in bytestream format. */
2993 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2994 {
2995         u32 v;
2996         int res = tg3_nvram_read(tp, offset, &v);
2997         if (!res)
2998                 *val = cpu_to_be32(v);
2999         return res;
3000 }
3001
3002 #define RX_CPU_SCRATCH_BASE     0x30000
3003 #define RX_CPU_SCRATCH_SIZE     0x04000
3004 #define TX_CPU_SCRATCH_BASE     0x34000
3005 #define TX_CPU_SCRATCH_SIZE     0x04000
3006
3007 /* tp->lock is held. */
3008 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3009 {
3010         int i;
3011
3012         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3013
3014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3015                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3016
3017                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3018                 return 0;
3019         }
3020         if (offset == RX_CPU_BASE) {
3021                 for (i = 0; i < 10000; i++) {
3022                         tw32(offset + CPU_STATE, 0xffffffff);
3023                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3024                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3025                                 break;
3026                 }
3027
3028                 tw32(offset + CPU_STATE, 0xffffffff);
3029                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3030                 udelay(10);
3031         } else {
3032                 for (i = 0; i < 10000; i++) {
3033                         tw32(offset + CPU_STATE, 0xffffffff);
3034                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3035                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3036                                 break;
3037                 }
3038         }
3039
3040         if (i >= 10000) {
3041                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3042                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3043                 return -ENODEV;
3044         }
3045
3046         /* Clear firmware's nvram arbitration. */
3047         if (tg3_flag(tp, NVRAM))
3048                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3049         return 0;
3050 }
3051
3052 struct fw_info {
3053         unsigned int fw_base;
3054         unsigned int fw_len;
3055         const __be32 *fw_data;
3056 };
3057
3058 /* tp->lock is held. */
3059 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3060                                  u32 cpu_scratch_base, int cpu_scratch_size,
3061                                  struct fw_info *info)
3062 {
3063         int err, lock_err, i;
3064         void (*write_op)(struct tg3 *, u32, u32);
3065
3066         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3067                 netdev_err(tp->dev,
3068                            "%s: Trying to load TX cpu firmware which is 5705\n",
3069                            __func__);
3070                 return -EINVAL;
3071         }
3072
3073         if (tg3_flag(tp, 5705_PLUS))
3074                 write_op = tg3_write_mem;
3075         else
3076                 write_op = tg3_write_indirect_reg32;
3077
3078         /* It is possible that bootcode is still loading at this point.
3079          * Get the nvram lock first before halting the cpu.
3080          */
3081         lock_err = tg3_nvram_lock(tp);
3082         err = tg3_halt_cpu(tp, cpu_base);
3083         if (!lock_err)
3084                 tg3_nvram_unlock(tp);
3085         if (err)
3086                 goto out;
3087
3088         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3089                 write_op(tp, cpu_scratch_base + i, 0);
3090         tw32(cpu_base + CPU_STATE, 0xffffffff);
3091         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3092         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3093                 write_op(tp, (cpu_scratch_base +
3094                               (info->fw_base & 0xffff) +
3095                               (i * sizeof(u32))),
3096                               be32_to_cpu(info->fw_data[i]));
3097
3098         err = 0;
3099
3100 out:
3101         return err;
3102 }
3103
3104 /* tp->lock is held. */
3105 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3106 {
3107         struct fw_info info;
3108         const __be32 *fw_data;
3109         int err, i;
3110
3111         fw_data = (void *)tp->fw->data;
3112
3113         /* Firmware blob starts with version numbers, followed by
3114            start address and length. We are setting complete length.
3115            length = end_address_of_bss - start_address_of_text.
3116            Remainder is the blob to be loaded contiguously
3117            from start address. */
3118
3119         info.fw_base = be32_to_cpu(fw_data[1]);
3120         info.fw_len = tp->fw->size - 12;
3121         info.fw_data = &fw_data[3];
3122
3123         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3124                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3125                                     &info);
3126         if (err)
3127                 return err;
3128
3129         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3130                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3131                                     &info);
3132         if (err)
3133                 return err;
3134
3135         /* Now startup only the RX cpu. */
3136         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3137         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3138
3139         for (i = 0; i < 5; i++) {
3140                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3141                         break;
3142                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3143                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3144                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3145                 udelay(1000);
3146         }
3147         if (i >= 5) {
3148                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3149                            "should be %08x\n", __func__,
3150                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3151                 return -ENODEV;
3152         }
3153         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3154         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3155
3156         return 0;
3157 }
3158
3159 /* tp->lock is held. */
3160 static int tg3_load_tso_firmware(struct tg3 *tp)
3161 {
3162         struct fw_info info;
3163         const __be32 *fw_data;
3164         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3165         int err, i;
3166
3167         if (tg3_flag(tp, HW_TSO_1) ||
3168             tg3_flag(tp, HW_TSO_2) ||
3169             tg3_flag(tp, HW_TSO_3))
3170                 return 0;
3171
3172         fw_data = (void *)tp->fw->data;
3173
3174         /* Firmware blob starts with version numbers, followed by
3175            start address and length. We are setting complete length.
3176            length = end_address_of_bss - start_address_of_text.
3177            Remainder is the blob to be loaded contiguously
3178            from start address. */
3179
3180         info.fw_base = be32_to_cpu(fw_data[1]);
3181         cpu_scratch_size = tp->fw_len;
3182         info.fw_len = tp->fw->size - 12;
3183         info.fw_data = &fw_data[3];
3184
3185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3186                 cpu_base = RX_CPU_BASE;
3187                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3188         } else {
3189                 cpu_base = TX_CPU_BASE;
3190                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3191                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3192         }
3193
3194         err = tg3_load_firmware_cpu(tp, cpu_base,
3195                                     cpu_scratch_base, cpu_scratch_size,
3196                                     &info);
3197         if (err)
3198                 return err;
3199
3200         /* Now startup the cpu. */
3201         tw32(cpu_base + CPU_STATE, 0xffffffff);
3202         tw32_f(cpu_base + CPU_PC, info.fw_base);
3203
3204         for (i = 0; i < 5; i++) {
3205                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3206                         break;
3207                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3208                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3209                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3210                 udelay(1000);
3211         }
3212         if (i >= 5) {
3213                 netdev_err(tp->dev,
3214                            "%s fails to set CPU PC, is %08x should be %08x\n",
3215                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3216                 return -ENODEV;
3217         }
3218         tw32(cpu_base + CPU_STATE, 0xffffffff);
3219         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3220         return 0;
3221 }
3222
3223
3224 /* tp->lock is held. */
3225 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3226 {
3227         u32 addr_high, addr_low;
3228         int i;
3229
3230         addr_high = ((tp->dev->dev_addr[0] << 8) |
3231                      tp->dev->dev_addr[1]);
3232         addr_low = ((tp->dev->dev_addr[2] << 24) |
3233                     (tp->dev->dev_addr[3] << 16) |
3234                     (tp->dev->dev_addr[4] <<  8) |
3235                     (tp->dev->dev_addr[5] <<  0));
3236         for (i = 0; i < 4; i++) {
3237                 if (i == 1 && skip_mac_1)
3238                         continue;
3239                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3240                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3241         }
3242
3243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3245                 for (i = 0; i < 12; i++) {
3246                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3247                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3248                 }
3249         }
3250
3251         addr_high = (tp->dev->dev_addr[0] +
3252                      tp->dev->dev_addr[1] +
3253                      tp->dev->dev_addr[2] +
3254                      tp->dev->dev_addr[3] +
3255                      tp->dev->dev_addr[4] +
3256                      tp->dev->dev_addr[5]) &
3257                 TX_BACKOFF_SEED_MASK;
3258         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3259 }
3260
3261 static void tg3_enable_register_access(struct tg3 *tp)
3262 {
3263         /*
3264          * Make sure register accesses (indirect or otherwise) will function
3265          * correctly.
3266          */
3267         pci_write_config_dword(tp->pdev,
3268                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3269 }
3270
3271 static int tg3_power_up(struct tg3 *tp)
3272 {
3273         int err;
3274
3275         tg3_enable_register_access(tp);
3276
3277         err = pci_set_power_state(tp->pdev, PCI_D0);
3278         if (!err) {
3279                 /* Switch out of Vaux if it is a NIC */
3280                 tg3_pwrsrc_switch_to_vmain(tp);
3281         } else {
3282                 netdev_err(tp->dev, "Transition to D0 failed\n");
3283         }
3284
3285         return err;
3286 }
3287
3288 static int tg3_power_down_prepare(struct tg3 *tp)
3289 {
3290         u32 misc_host_ctrl;
3291         bool device_should_wake, do_low_power;
3292
3293         tg3_enable_register_access(tp);
3294
3295         /* Restore the CLKREQ setting. */
3296         if (tg3_flag(tp, CLKREQ_BUG)) {
3297                 u16 lnkctl;
3298
3299                 pci_read_config_word(tp->pdev,
3300                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3301                                      &lnkctl);
3302                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3303                 pci_write_config_word(tp->pdev,
3304                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3305                                       lnkctl);
3306         }
3307
3308         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3309         tw32(TG3PCI_MISC_HOST_CTRL,
3310              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3311
3312         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3313                              tg3_flag(tp, WOL_ENABLE);
3314
3315         if (tg3_flag(tp, USE_PHYLIB)) {
3316                 do_low_power = false;
3317                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3318                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3319                         struct phy_device *phydev;
3320                         u32 phyid, advertising;
3321
3322                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3323
3324                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3325
3326                         tp->link_config.orig_speed = phydev->speed;
3327                         tp->link_config.orig_duplex = phydev->duplex;
3328                         tp->link_config.orig_autoneg = phydev->autoneg;
3329                         tp->link_config.orig_advertising = phydev->advertising;
3330
3331                         advertising = ADVERTISED_TP |
3332                                       ADVERTISED_Pause |
3333                                       ADVERTISED_Autoneg |
3334                                       ADVERTISED_10baseT_Half;
3335
3336                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3337                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3338                                         advertising |=
3339                                                 ADVERTISED_100baseT_Half |
3340                                                 ADVERTISED_100baseT_Full |
3341                                                 ADVERTISED_10baseT_Full;
3342                                 else
3343                                         advertising |= ADVERTISED_10baseT_Full;
3344                         }
3345
3346                         phydev->advertising = advertising;
3347
3348                         phy_start_aneg(phydev);
3349
3350                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3351                         if (phyid != PHY_ID_BCMAC131) {
3352                                 phyid &= PHY_BCM_OUI_MASK;
3353                                 if (phyid == PHY_BCM_OUI_1 ||
3354                                     phyid == PHY_BCM_OUI_2 ||
3355                                     phyid == PHY_BCM_OUI_3)
3356                                         do_low_power = true;
3357                         }
3358                 }
3359         } else {
3360                 do_low_power = true;
3361
3362                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3364                         tp->link_config.orig_speed = tp->link_config.speed;
3365                         tp->link_config.orig_duplex = tp->link_config.duplex;
3366                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3367                 }
3368
3369                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3370                         tp->link_config.speed = SPEED_10;
3371                         tp->link_config.duplex = DUPLEX_HALF;
3372                         tp->link_config.autoneg = AUTONEG_ENABLE;
3373                         tg3_setup_phy(tp, 0);
3374                 }
3375         }
3376
3377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3378                 u32 val;
3379
3380                 val = tr32(GRC_VCPU_EXT_CTRL);
3381                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3382         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3383                 int i;
3384                 u32 val;
3385
3386                 for (i = 0; i < 200; i++) {
3387                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3388                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3389                                 break;
3390                         msleep(1);
3391                 }
3392         }
3393         if (tg3_flag(tp, WOL_CAP))
3394                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3395                                                      WOL_DRV_STATE_SHUTDOWN |
3396                                                      WOL_DRV_WOL |
3397                                                      WOL_SET_MAGIC_PKT);
3398
3399         if (device_should_wake) {
3400                 u32 mac_mode;
3401
3402                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3403                         if (do_low_power &&
3404                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3405                                 tg3_phy_auxctl_write(tp,
3406                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3407                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3408                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3409                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3410                                 udelay(40);
3411                         }
3412
3413                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3414                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3415                         else
3416                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3417
3418                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3419                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3420                             ASIC_REV_5700) {
3421                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3422                                              SPEED_100 : SPEED_10;
3423                                 if (tg3_5700_link_polarity(tp, speed))
3424                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3425                                 else
3426                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3427                         }
3428                 } else {
3429                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3430                 }
3431
3432                 if (!tg3_flag(tp, 5750_PLUS))
3433                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3434
3435                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3436                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3437                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3438                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3439
3440                 if (tg3_flag(tp, ENABLE_APE))
3441                         mac_mode |= MAC_MODE_APE_TX_EN |
3442                                     MAC_MODE_APE_RX_EN |
3443                                     MAC_MODE_TDE_ENABLE;
3444
3445                 tw32_f(MAC_MODE, mac_mode);
3446                 udelay(100);
3447
3448                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3449                 udelay(10);
3450         }
3451
3452         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3453             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3454              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3455                 u32 base_val;
3456
3457                 base_val = tp->pci_clock_ctrl;
3458                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3459                              CLOCK_CTRL_TXCLK_DISABLE);
3460
3461                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3462                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3463         } else if (tg3_flag(tp, 5780_CLASS) ||
3464                    tg3_flag(tp, CPMU_PRESENT) ||
3465                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3466                 /* do nothing */
3467         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3468                 u32 newbits1, newbits2;
3469
3470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3471                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3472                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3473                                     CLOCK_CTRL_TXCLK_DISABLE |
3474                                     CLOCK_CTRL_ALTCLK);
3475                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3476                 } else if (tg3_flag(tp, 5705_PLUS)) {
3477                         newbits1 = CLOCK_CTRL_625_CORE;
3478                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3479                 } else {
3480                         newbits1 = CLOCK_CTRL_ALTCLK;
3481                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3482                 }
3483
3484                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3485                             40);
3486
3487                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3488                             40);
3489
3490                 if (!tg3_flag(tp, 5705_PLUS)) {
3491                         u32 newbits3;
3492
3493                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3494                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3495                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3496                                             CLOCK_CTRL_TXCLK_DISABLE |
3497                                             CLOCK_CTRL_44MHZ_CORE);
3498                         } else {
3499                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3500                         }
3501
3502                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3503                                     tp->pci_clock_ctrl | newbits3, 40);
3504                 }
3505         }
3506
3507         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3508                 tg3_power_down_phy(tp, do_low_power);
3509
3510         tg3_frob_aux_power(tp, true);
3511
3512         /* Workaround for unstable PLL clock */
3513         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3514             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3515                 u32 val = tr32(0x7d00);
3516
3517                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3518                 tw32(0x7d00, val);
3519                 if (!tg3_flag(tp, ENABLE_ASF)) {
3520                         int err;
3521
3522                         err = tg3_nvram_lock(tp);
3523                         tg3_halt_cpu(tp, RX_CPU_BASE);
3524                         if (!err)
3525                                 tg3_nvram_unlock(tp);
3526                 }
3527         }
3528
3529         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3530
3531         return 0;
3532 }
3533
3534 static void tg3_power_down(struct tg3 *tp)
3535 {
3536         tg3_power_down_prepare(tp);
3537
3538         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3539         pci_set_power_state(tp->pdev, PCI_D3hot);
3540 }
3541
3542 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3543 {
3544         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3545         case MII_TG3_AUX_STAT_10HALF:
3546                 *speed = SPEED_10;
3547                 *duplex = DUPLEX_HALF;
3548                 break;
3549
3550         case MII_TG3_AUX_STAT_10FULL:
3551                 *speed = SPEED_10;
3552                 *duplex = DUPLEX_FULL;
3553                 break;
3554
3555         case MII_TG3_AUX_STAT_100HALF:
3556                 *speed = SPEED_100;
3557                 *duplex = DUPLEX_HALF;
3558                 break;
3559
3560         case MII_TG3_AUX_STAT_100FULL:
3561                 *speed = SPEED_100;
3562                 *duplex = DUPLEX_FULL;
3563                 break;
3564
3565         case MII_TG3_AUX_STAT_1000HALF:
3566                 *speed = SPEED_1000;
3567                 *duplex = DUPLEX_HALF;
3568                 break;
3569
3570         case MII_TG3_AUX_STAT_1000FULL:
3571                 *speed = SPEED_1000;
3572                 *duplex = DUPLEX_FULL;
3573                 break;
3574
3575         default:
3576                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3577                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3578                                  SPEED_10;
3579                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3580                                   DUPLEX_HALF;
3581                         break;
3582                 }
3583                 *speed = SPEED_INVALID;
3584                 *duplex = DUPLEX_INVALID;
3585                 break;
3586         }
3587 }
3588
3589 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3590 {
3591         int err = 0;
3592         u32 val, new_adv;
3593
3594         new_adv = ADVERTISE_CSMA;
3595         if (advertise & ADVERTISED_10baseT_Half)
3596                 new_adv |= ADVERTISE_10HALF;
3597         if (advertise & ADVERTISED_10baseT_Full)
3598                 new_adv |= ADVERTISE_10FULL;
3599         if (advertise & ADVERTISED_100baseT_Half)
3600                 new_adv |= ADVERTISE_100HALF;
3601         if (advertise & ADVERTISED_100baseT_Full)
3602                 new_adv |= ADVERTISE_100FULL;
3603
3604         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3605
3606         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3607         if (err)
3608                 goto done;
3609
3610         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3611                 goto done;
3612
3613         new_adv = 0;
3614         if (advertise & ADVERTISED_1000baseT_Half)
3615                 new_adv |= ADVERTISE_1000HALF;
3616         if (advertise & ADVERTISED_1000baseT_Full)
3617                 new_adv |= ADVERTISE_1000FULL;
3618
3619         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3620             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3621                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3622
3623         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3624         if (err)
3625                 goto done;
3626
3627         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3628                 goto done;
3629
3630         tw32(TG3_CPMU_EEE_MODE,
3631              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3632
3633         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3634         if (!err) {
3635                 u32 err2;
3636
3637                 val = 0;
3638                 /* Advertise 100-BaseTX EEE ability */
3639                 if (advertise & ADVERTISED_100baseT_Full)
3640                         val |= MDIO_AN_EEE_ADV_100TX;
3641                 /* Advertise 1000-BaseT EEE ability */
3642                 if (advertise & ADVERTISED_1000baseT_Full)
3643                         val |= MDIO_AN_EEE_ADV_1000T;
3644                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3645                 if (err)
3646                         val = 0;
3647
3648                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3649                 case ASIC_REV_5717:
3650                 case ASIC_REV_57765:
3651                 case ASIC_REV_5719:
3652                         /* If we advertised any eee advertisements above... */
3653                         if (val)
3654                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3655                                       MII_TG3_DSP_TAP26_RMRXSTO |
3656                                       MII_TG3_DSP_TAP26_OPCSINPT;
3657                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3658                         /* Fall through */
3659                 case ASIC_REV_5720:
3660                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3661                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3662                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3663                 }
3664
3665                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3666                 if (!err)
3667                         err = err2;
3668         }
3669
3670 done:
3671         return err;
3672 }
3673
3674 static void tg3_phy_copper_begin(struct tg3 *tp)
3675 {
3676         u32 new_adv;
3677         int i;
3678
3679         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3680                 new_adv = ADVERTISED_10baseT_Half |
3681                           ADVERTISED_10baseT_Full;
3682                 if (tg3_flag(tp, WOL_SPEED_100MB))
3683                         new_adv |= ADVERTISED_100baseT_Half |
3684                                    ADVERTISED_100baseT_Full;
3685
3686                 tg3_phy_autoneg_cfg(tp, new_adv,
3687                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3688         } else if (tp->link_config.speed == SPEED_INVALID) {
3689                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3690                         tp->link_config.advertising &=
3691                                 ~(ADVERTISED_1000baseT_Half |
3692                                   ADVERTISED_1000baseT_Full);
3693
3694                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3695                                     tp->link_config.flowctrl);
3696         } else {
3697                 /* Asking for a specific link mode. */
3698                 if (tp->link_config.speed == SPEED_1000) {
3699                         if (tp->link_config.duplex == DUPLEX_FULL)
3700                                 new_adv = ADVERTISED_1000baseT_Full;
3701                         else
3702                                 new_adv = ADVERTISED_1000baseT_Half;
3703                 } else if (tp->link_config.speed == SPEED_100) {
3704                         if (tp->link_config.duplex == DUPLEX_FULL)
3705                                 new_adv = ADVERTISED_100baseT_Full;
3706                         else
3707                                 new_adv = ADVERTISED_100baseT_Half;
3708                 } else {
3709                         if (tp->link_config.duplex == DUPLEX_FULL)
3710                                 new_adv = ADVERTISED_10baseT_Full;
3711                         else
3712                                 new_adv = ADVERTISED_10baseT_Half;
3713                 }
3714
3715                 tg3_phy_autoneg_cfg(tp, new_adv,
3716                                     tp->link_config.flowctrl);
3717         }
3718
3719         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3720             tp->link_config.speed != SPEED_INVALID) {
3721                 u32 bmcr, orig_bmcr;
3722
3723                 tp->link_config.active_speed = tp->link_config.speed;
3724                 tp->link_config.active_duplex = tp->link_config.duplex;
3725
3726                 bmcr = 0;
3727                 switch (tp->link_config.speed) {
3728                 default:
3729                 case SPEED_10:
3730                         break;
3731
3732                 case SPEED_100:
3733                         bmcr |= BMCR_SPEED100;
3734                         break;
3735
3736                 case SPEED_1000:
3737                         bmcr |= BMCR_SPEED1000;
3738                         break;
3739                 }
3740
3741                 if (tp->link_config.duplex == DUPLEX_FULL)
3742                         bmcr |= BMCR_FULLDPLX;
3743
3744                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3745                     (bmcr != orig_bmcr)) {
3746                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3747                         for (i = 0; i < 1500; i++) {
3748                                 u32 tmp;
3749
3750                                 udelay(10);
3751                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3752                                     tg3_readphy(tp, MII_BMSR, &tmp))
3753                                         continue;
3754                                 if (!(tmp & BMSR_LSTATUS)) {
3755                                         udelay(40);
3756                                         break;
3757                                 }
3758                         }
3759                         tg3_writephy(tp, MII_BMCR, bmcr);
3760                         udelay(40);
3761                 }
3762         } else {
3763                 tg3_writephy(tp, MII_BMCR,
3764                              BMCR_ANENABLE | BMCR_ANRESTART);
3765         }
3766 }
3767
3768 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3769 {
3770         int err;
3771
3772         /* Turn off tap power management. */
3773         /* Set Extended packet length bit */
3774         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3775
3776         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3777         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3778         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3779         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3780         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3781
3782         udelay(40);
3783
3784         return err;
3785 }
3786
3787 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3788 {
3789         u32 adv_reg, all_mask = 0;
3790
3791         if (mask & ADVERTISED_10baseT_Half)
3792                 all_mask |= ADVERTISE_10HALF;
3793         if (mask & ADVERTISED_10baseT_Full)
3794                 all_mask |= ADVERTISE_10FULL;
3795         if (mask & ADVERTISED_100baseT_Half)
3796                 all_mask |= ADVERTISE_100HALF;
3797         if (mask & ADVERTISED_100baseT_Full)
3798                 all_mask |= ADVERTISE_100FULL;
3799
3800         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3801                 return 0;
3802
3803         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3804                 return 0;
3805
3806         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3807                 u32 tg3_ctrl;
3808
3809                 all_mask = 0;
3810                 if (mask & ADVERTISED_1000baseT_Half)
3811                         all_mask |= ADVERTISE_1000HALF;
3812                 if (mask & ADVERTISED_1000baseT_Full)
3813                         all_mask |= ADVERTISE_1000FULL;
3814
3815                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3816                         return 0;
3817
3818                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3819                 if (tg3_ctrl != all_mask)
3820                         return 0;
3821         }
3822
3823         return 1;
3824 }
3825
3826 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3827 {
3828         u32 curadv, reqadv;
3829
3830         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3831                 return 1;
3832
3833         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3834         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3835
3836         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3837                 if (curadv != reqadv)
3838                         return 0;
3839
3840                 if (tg3_flag(tp, PAUSE_AUTONEG))
3841                         tg3_readphy(tp, MII_LPA, rmtadv);
3842         } else {
3843                 /* Reprogram the advertisement register, even if it
3844                  * does not affect the current link.  If the link
3845                  * gets renegotiated in the future, we can save an
3846                  * additional renegotiation cycle by advertising
3847                  * it correctly in the first place.
3848                  */
3849                 if (curadv != reqadv) {
3850                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3851                                      ADVERTISE_PAUSE_ASYM);
3852                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3853                 }
3854         }
3855
3856         return 1;
3857 }
3858
3859 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3860 {
3861         int current_link_up;
3862         u32 bmsr, val;
3863         u32 lcl_adv, rmt_adv;
3864         u16 current_speed;
3865         u8 current_duplex;
3866         int i, err;
3867
3868         tw32(MAC_EVENT, 0);
3869
3870         tw32_f(MAC_STATUS,
3871              (MAC_STATUS_SYNC_CHANGED |
3872               MAC_STATUS_CFG_CHANGED |
3873               MAC_STATUS_MI_COMPLETION |
3874               MAC_STATUS_LNKSTATE_CHANGED));
3875         udelay(40);
3876
3877         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3878                 tw32_f(MAC_MI_MODE,
3879                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3880                 udelay(80);
3881         }
3882
3883         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3884
3885         /* Some third-party PHYs need to be reset on link going
3886          * down.
3887          */
3888         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3889              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3890              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3891             netif_carrier_ok(tp->dev)) {
3892                 tg3_readphy(tp, MII_BMSR, &bmsr);
3893                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3894                     !(bmsr & BMSR_LSTATUS))
3895                         force_reset = 1;
3896         }
3897         if (force_reset)
3898                 tg3_phy_reset(tp);
3899
3900         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3901                 tg3_readphy(tp, MII_BMSR, &bmsr);
3902                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3903                     !tg3_flag(tp, INIT_COMPLETE))
3904                         bmsr = 0;
3905
3906                 if (!(bmsr & BMSR_LSTATUS)) {
3907                         err = tg3_init_5401phy_dsp(tp);
3908                         if (err)
3909                                 return err;
3910
3911                         tg3_readphy(tp, MII_BMSR, &bmsr);
3912                         for (i = 0; i < 1000; i++) {
3913                                 udelay(10);
3914                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3915                                     (bmsr & BMSR_LSTATUS)) {
3916                                         udelay(40);
3917                                         break;
3918                                 }
3919                         }
3920
3921                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3922                             TG3_PHY_REV_BCM5401_B0 &&
3923                             !(bmsr & BMSR_LSTATUS) &&
3924                             tp->link_config.active_speed == SPEED_1000) {
3925                                 err = tg3_phy_reset(tp);
3926                                 if (!err)
3927                                         err = tg3_init_5401phy_dsp(tp);
3928                                 if (err)
3929                                         return err;
3930                         }
3931                 }
3932         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3933                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3934                 /* 5701 {A0,B0} CRC bug workaround */
3935                 tg3_writephy(tp, 0x15, 0x0a75);
3936                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3937                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3938                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3939         }
3940
3941         /* Clear pending interrupts... */
3942         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3943         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3944
3945         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3946                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3947         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3948                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3949
3950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3952                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3953                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3954                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3955                 else
3956                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3957         }
3958
3959         current_link_up = 0;
3960         current_speed = SPEED_INVALID;
3961         current_duplex = DUPLEX_INVALID;
3962
3963         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3964                 err = tg3_phy_auxctl_read(tp,
3965                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3966                                           &val);
3967                 if (!err && !(val & (1 << 10))) {
3968                         tg3_phy_auxctl_write(tp,
3969                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3970                                              val | (1 << 10));
3971                         goto relink;
3972                 }
3973         }
3974
3975         bmsr = 0;
3976         for (i = 0; i < 100; i++) {
3977                 tg3_readphy(tp, MII_BMSR, &bmsr);
3978                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3979                     (bmsr & BMSR_LSTATUS))
3980                         break;
3981                 udelay(40);
3982         }
3983
3984         if (bmsr & BMSR_LSTATUS) {
3985                 u32 aux_stat, bmcr;
3986
3987                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3988                 for (i = 0; i < 2000; i++) {
3989                         udelay(10);
3990                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3991                             aux_stat)
3992                                 break;
3993                 }
3994
3995                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3996                                              &current_speed,
3997                                              &current_duplex);
3998
3999                 bmcr = 0;
4000                 for (i = 0; i < 200; i++) {
4001                         tg3_readphy(tp, MII_BMCR, &bmcr);
4002                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4003                                 continue;
4004                         if (bmcr && bmcr != 0x7fff)
4005                                 break;
4006                         udelay(10);
4007                 }
4008
4009                 lcl_adv = 0;
4010                 rmt_adv = 0;
4011
4012                 tp->link_config.active_speed = current_speed;
4013                 tp->link_config.active_duplex = current_duplex;
4014
4015                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4016                         if ((bmcr & BMCR_ANENABLE) &&
4017                             tg3_copper_is_advertising_all(tp,
4018                                                 tp->link_config.advertising)) {
4019                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4020                                                                   &rmt_adv))
4021                                         current_link_up = 1;
4022                         }
4023                 } else {
4024                         if (!(bmcr & BMCR_ANENABLE) &&
4025                             tp->link_config.speed == current_speed &&
4026                             tp->link_config.duplex == current_duplex &&
4027                             tp->link_config.flowctrl ==
4028                             tp->link_config.active_flowctrl) {
4029                                 current_link_up = 1;
4030                         }
4031                 }
4032
4033                 if (current_link_up == 1 &&
4034                     tp->link_config.active_duplex == DUPLEX_FULL)
4035                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4036         }
4037
4038 relink:
4039         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040                 tg3_phy_copper_begin(tp);
4041
4042                 tg3_readphy(tp, MII_BMSR, &bmsr);
4043                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4044                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4045                         current_link_up = 1;
4046         }
4047
4048         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4049         if (current_link_up == 1) {
4050                 if (tp->link_config.active_speed == SPEED_100 ||
4051                     tp->link_config.active_speed == SPEED_10)
4052                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4053                 else
4054                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4055         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4056                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4057         else
4058                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4059
4060         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4061         if (tp->link_config.active_duplex == DUPLEX_HALF)
4062                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4063
4064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4065                 if (current_link_up == 1 &&
4066                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4067                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4068                 else
4069                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4070         }
4071
4072         /* ??? Without this setting Netgear GA302T PHY does not
4073          * ??? send/receive packets...
4074          */
4075         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4076             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4077                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4078                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4079                 udelay(80);
4080         }
4081
4082         tw32_f(MAC_MODE, tp->mac_mode);
4083         udelay(40);
4084
4085         tg3_phy_eee_adjust(tp, current_link_up);
4086
4087         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4088                 /* Polled via timer. */
4089                 tw32_f(MAC_EVENT, 0);
4090         } else {
4091                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4092         }
4093         udelay(40);
4094
4095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4096             current_link_up == 1 &&
4097             tp->link_config.active_speed == SPEED_1000 &&
4098             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4099                 udelay(120);
4100                 tw32_f(MAC_STATUS,
4101                      (MAC_STATUS_SYNC_CHANGED |
4102                       MAC_STATUS_CFG_CHANGED));
4103                 udelay(40);
4104                 tg3_write_mem(tp,
4105                               NIC_SRAM_FIRMWARE_MBOX,
4106                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4107         }
4108
4109         /* Prevent send BD corruption. */
4110         if (tg3_flag(tp, CLKREQ_BUG)) {
4111                 u16 oldlnkctl, newlnkctl;
4112
4113                 pci_read_config_word(tp->pdev,
4114                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4115                                      &oldlnkctl);
4116                 if (tp->link_config.active_speed == SPEED_100 ||
4117                     tp->link_config.active_speed == SPEED_10)
4118                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4119                 else
4120                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4121                 if (newlnkctl != oldlnkctl)
4122                         pci_write_config_word(tp->pdev,
4123                                               pci_pcie_cap(tp->pdev) +
4124                                               PCI_EXP_LNKCTL, newlnkctl);
4125         }
4126
4127         if (current_link_up != netif_carrier_ok(tp->dev)) {
4128                 if (current_link_up)
4129                         netif_carrier_on(tp->dev);
4130                 else
4131                         netif_carrier_off(tp->dev);
4132                 tg3_link_report(tp);
4133         }
4134
4135         return 0;
4136 }
4137
4138 struct tg3_fiber_aneginfo {
4139         int state;
4140 #define ANEG_STATE_UNKNOWN              0
4141 #define ANEG_STATE_AN_ENABLE            1
4142 #define ANEG_STATE_RESTART_INIT         2
4143 #define ANEG_STATE_RESTART              3
4144 #define ANEG_STATE_DISABLE_LINK_OK      4
4145 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4146 #define ANEG_STATE_ABILITY_DETECT       6
4147 #define ANEG_STATE_ACK_DETECT_INIT      7
4148 #define ANEG_STATE_ACK_DETECT           8
4149 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4150 #define ANEG_STATE_COMPLETE_ACK         10
4151 #define ANEG_STATE_IDLE_DETECT_INIT     11
4152 #define ANEG_STATE_IDLE_DETECT          12
4153 #define ANEG_STATE_LINK_OK              13
4154 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4155 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4156
4157         u32 flags;
4158 #define MR_AN_ENABLE            0x00000001
4159 #define MR_RESTART_AN           0x00000002
4160 #define MR_AN_COMPLETE          0x00000004
4161 #define MR_PAGE_RX              0x00000008
4162 #define MR_NP_LOADED            0x00000010
4163 #define MR_TOGGLE_TX            0x00000020
4164 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4165 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4166 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4167 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4168 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4169 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4170 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4171 #define MR_TOGGLE_RX            0x00002000
4172 #define MR_NP_RX                0x00004000
4173
4174 #define MR_LINK_OK              0x80000000
4175
4176         unsigned long link_time, cur_time;
4177
4178         u32 ability_match_cfg;
4179         int ability_match_count;
4180
4181         char ability_match, idle_match, ack_match;
4182
4183         u32 txconfig, rxconfig;
4184 #define ANEG_CFG_NP             0x00000080
4185 #define ANEG_CFG_ACK            0x00000040
4186 #define ANEG_CFG_RF2            0x00000020
4187 #define ANEG_CFG_RF1            0x00000010
4188 #define ANEG_CFG_PS2            0x00000001
4189 #define ANEG_CFG_PS1            0x00008000
4190 #define ANEG_CFG_HD             0x00004000
4191 #define ANEG_CFG_FD             0x00002000
4192 #define ANEG_CFG_INVAL          0x00001f06
4193
4194 };
4195 #define ANEG_OK         0
4196 #define ANEG_DONE       1
4197 #define ANEG_TIMER_ENAB 2
4198 #define ANEG_FAILED     -1
4199
4200 #define ANEG_STATE_SETTLE_TIME  10000
4201
4202 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4203                                    struct tg3_fiber_aneginfo *ap)
4204 {
4205         u16 flowctrl;
4206         unsigned long delta;
4207         u32 rx_cfg_reg;
4208         int ret;
4209
4210         if (ap->state == ANEG_STATE_UNKNOWN) {
4211                 ap->rxconfig = 0;
4212                 ap->link_time = 0;
4213                 ap->cur_time = 0;
4214                 ap->ability_match_cfg = 0;
4215                 ap->ability_match_count = 0;
4216                 ap->ability_match = 0;
4217                 ap->idle_match = 0;
4218                 ap->ack_match = 0;
4219         }
4220         ap->cur_time++;
4221
4222         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4223                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4224
4225                 if (rx_cfg_reg != ap->ability_match_cfg) {
4226                         ap->ability_match_cfg = rx_cfg_reg;
4227                         ap->ability_match = 0;
4228                         ap->ability_match_count = 0;
4229                 } else {
4230                         if (++ap->ability_match_count > 1) {
4231                                 ap->ability_match = 1;
4232                                 ap->ability_match_cfg = rx_cfg_reg;
4233                         }
4234                 }
4235                 if (rx_cfg_reg & ANEG_CFG_ACK)
4236                         ap->ack_match = 1;
4237                 else
4238                         ap->ack_match = 0;
4239
4240                 ap->idle_match = 0;
4241         } else {
4242                 ap->idle_match = 1;
4243                 ap->ability_match_cfg = 0;
4244                 ap->ability_match_count = 0;
4245                 ap->ability_match = 0;
4246                 ap->ack_match = 0;
4247
4248                 rx_cfg_reg = 0;
4249         }
4250
4251         ap->rxconfig = rx_cfg_reg;
4252         ret = ANEG_OK;
4253
4254         switch (ap->state) {
4255         case ANEG_STATE_UNKNOWN:
4256                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4257                         ap->state = ANEG_STATE_AN_ENABLE;
4258
4259                 /* fallthru */
4260         case ANEG_STATE_AN_ENABLE:
4261                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4262                 if (ap->flags & MR_AN_ENABLE) {
4263                         ap->link_time = 0;
4264                         ap->cur_time = 0;
4265                         ap->ability_match_cfg = 0;
4266                         ap->ability_match_count = 0;
4267                         ap->ability_match = 0;
4268                         ap->idle_match = 0;
4269                         ap->ack_match = 0;
4270
4271                         ap->state = ANEG_STATE_RESTART_INIT;
4272                 } else {
4273                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4274                 }
4275                 break;
4276
4277         case ANEG_STATE_RESTART_INIT:
4278                 ap->link_time = ap->cur_time;
4279                 ap->flags &= ~(MR_NP_LOADED);
4280                 ap->txconfig = 0;
4281                 tw32(MAC_TX_AUTO_NEG, 0);
4282                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4283                 tw32_f(MAC_MODE, tp->mac_mode);
4284                 udelay(40);
4285
4286                 ret = ANEG_TIMER_ENAB;
4287                 ap->state = ANEG_STATE_RESTART;
4288
4289                 /* fallthru */
4290         case ANEG_STATE_RESTART:
4291                 delta = ap->cur_time - ap->link_time;
4292                 if (delta > ANEG_STATE_SETTLE_TIME)
4293                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4294                 else
4295                         ret = ANEG_TIMER_ENAB;
4296                 break;
4297
4298         case ANEG_STATE_DISABLE_LINK_OK:
4299                 ret = ANEG_DONE;
4300                 break;
4301
4302         case ANEG_STATE_ABILITY_DETECT_INIT:
4303                 ap->flags &= ~(MR_TOGGLE_TX);
4304                 ap->txconfig = ANEG_CFG_FD;
4305                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4306                 if (flowctrl & ADVERTISE_1000XPAUSE)
4307                         ap->txconfig |= ANEG_CFG_PS1;
4308                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4309                         ap->txconfig |= ANEG_CFG_PS2;
4310                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4311                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4312                 tw32_f(MAC_MODE, tp->mac_mode);
4313                 udelay(40);
4314
4315                 ap->state = ANEG_STATE_ABILITY_DETECT;
4316                 break;
4317
4318         case ANEG_STATE_ABILITY_DETECT:
4319                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4320                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4321                 break;
4322
4323         case ANEG_STATE_ACK_DETECT_INIT:
4324                 ap->txconfig |= ANEG_CFG_ACK;
4325                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4326                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327                 tw32_f(MAC_MODE, tp->mac_mode);
4328                 udelay(40);
4329
4330                 ap->state = ANEG_STATE_ACK_DETECT;
4331
4332                 /* fallthru */
4333         case ANEG_STATE_ACK_DETECT:
4334                 if (ap->ack_match != 0) {
4335                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4336                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4337                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4338                         } else {
4339                                 ap->state = ANEG_STATE_AN_ENABLE;
4340                         }
4341                 } else if (ap->ability_match != 0 &&
4342                            ap->rxconfig == 0) {
4343                         ap->state = ANEG_STATE_AN_ENABLE;
4344                 }
4345                 break;
4346
4347         case ANEG_STATE_COMPLETE_ACK_INIT:
4348                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4349                         ret = ANEG_FAILED;
4350                         break;
4351                 }
4352                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4353                                MR_LP_ADV_HALF_DUPLEX |
4354                                MR_LP_ADV_SYM_PAUSE |
4355                                MR_LP_ADV_ASYM_PAUSE |
4356                                MR_LP_ADV_REMOTE_FAULT1 |
4357                                MR_LP_ADV_REMOTE_FAULT2 |
4358                                MR_LP_ADV_NEXT_PAGE |
4359                                MR_TOGGLE_RX |
4360                                MR_NP_RX);
4361                 if (ap->rxconfig & ANEG_CFG_FD)
4362                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4363                 if (ap->rxconfig & ANEG_CFG_HD)
4364                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4365                 if (ap->rxconfig & ANEG_CFG_PS1)
4366                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4367                 if (ap->rxconfig & ANEG_CFG_PS2)
4368                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4369                 if (ap->rxconfig & ANEG_CFG_RF1)
4370                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4371                 if (ap->rxconfig & ANEG_CFG_RF2)
4372                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4373                 if (ap->rxconfig & ANEG_CFG_NP)
4374                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4375
4376                 ap->link_time = ap->cur_time;
4377
4378                 ap->flags ^= (MR_TOGGLE_TX);
4379                 if (ap->rxconfig & 0x0008)
4380                         ap->flags |= MR_TOGGLE_RX;
4381                 if (ap->rxconfig & ANEG_CFG_NP)
4382                         ap->flags |= MR_NP_RX;
4383                 ap->flags |= MR_PAGE_RX;
4384
4385                 ap->state = ANEG_STATE_COMPLETE_ACK;
4386                 ret = ANEG_TIMER_ENAB;
4387                 break;
4388
4389         case ANEG_STATE_COMPLETE_ACK:
4390                 if (ap->ability_match != 0 &&
4391                     ap->rxconfig == 0) {
4392                         ap->state = ANEG_STATE_AN_ENABLE;
4393                         break;
4394                 }
4395                 delta = ap->cur_time - ap->link_time;
4396                 if (delta > ANEG_STATE_SETTLE_TIME) {
4397                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4398                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4399                         } else {
4400                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4401                                     !(ap->flags & MR_NP_RX)) {
4402                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4403                                 } else {
4404                                         ret = ANEG_FAILED;
4405                                 }
4406                         }
4407                 }
4408                 break;
4409
4410         case ANEG_STATE_IDLE_DETECT_INIT:
4411                 ap->link_time = ap->cur_time;
4412                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4413                 tw32_f(MAC_MODE, tp->mac_mode);
4414                 udelay(40);
4415
4416                 ap->state = ANEG_STATE_IDLE_DETECT;
4417                 ret = ANEG_TIMER_ENAB;
4418                 break;
4419
4420         case ANEG_STATE_IDLE_DETECT:
4421                 if (ap->ability_match != 0 &&
4422                     ap->rxconfig == 0) {
4423                         ap->state = ANEG_STATE_AN_ENABLE;
4424                         break;
4425                 }
4426                 delta = ap->cur_time - ap->link_time;
4427                 if (delta > ANEG_STATE_SETTLE_TIME) {
4428                         /* XXX another gem from the Broadcom driver :( */
4429                         ap->state = ANEG_STATE_LINK_OK;
4430                 }
4431                 break;
4432
4433         case ANEG_STATE_LINK_OK:
4434                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4435                 ret = ANEG_DONE;
4436                 break;
4437
4438         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4439                 /* ??? unimplemented */
4440                 break;
4441
4442         case ANEG_STATE_NEXT_PAGE_WAIT:
4443                 /* ??? unimplemented */
4444                 break;
4445
4446         default:
4447                 ret = ANEG_FAILED;
4448                 break;
4449         }
4450
4451         return ret;
4452 }
4453
4454 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4455 {
4456         int res = 0;
4457         struct tg3_fiber_aneginfo aninfo;
4458         int status = ANEG_FAILED;
4459         unsigned int tick;
4460         u32 tmp;
4461
4462         tw32_f(MAC_TX_AUTO_NEG, 0);
4463
4464         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4465         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4466         udelay(40);
4467
4468         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4469         udelay(40);
4470
4471         memset(&aninfo, 0, sizeof(aninfo));
4472         aninfo.flags |= MR_AN_ENABLE;
4473         aninfo.state = ANEG_STATE_UNKNOWN;
4474         aninfo.cur_time = 0;
4475         tick = 0;
4476         while (++tick < 195000) {
4477                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4478                 if (status == ANEG_DONE || status == ANEG_FAILED)
4479                         break;
4480
4481                 udelay(1);
4482         }
4483
4484         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4485         tw32_f(MAC_MODE, tp->mac_mode);
4486         udelay(40);
4487
4488         *txflags = aninfo.txconfig;
4489         *rxflags = aninfo.flags;
4490
4491         if (status == ANEG_DONE &&
4492             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4493                              MR_LP_ADV_FULL_DUPLEX)))
4494                 res = 1;
4495
4496         return res;
4497 }
4498
4499 static void tg3_init_bcm8002(struct tg3 *tp)
4500 {
4501         u32 mac_status = tr32(MAC_STATUS);
4502         int i;
4503
4504         /* Reset when initting first time or we have a link. */
4505         if (tg3_flag(tp, INIT_COMPLETE) &&
4506             !(mac_status & MAC_STATUS_PCS_SYNCED))
4507                 return;
4508
4509         /* Set PLL lock range. */
4510         tg3_writephy(tp, 0x16, 0x8007);
4511
4512         /* SW reset */
4513         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4514
4515         /* Wait for reset to complete. */
4516         /* XXX schedule_timeout() ... */
4517         for (i = 0; i < 500; i++)
4518                 udelay(10);
4519
4520         /* Config mode; select PMA/Ch 1 regs. */
4521         tg3_writephy(tp, 0x10, 0x8411);
4522
4523         /* Enable auto-lock and comdet, select txclk for tx. */
4524         tg3_writephy(tp, 0x11, 0x0a10);
4525
4526         tg3_writephy(tp, 0x18, 0x00a0);
4527         tg3_writephy(tp, 0x16, 0x41ff);
4528
4529         /* Assert and deassert POR. */
4530         tg3_writephy(tp, 0x13, 0x0400);
4531         udelay(40);
4532         tg3_writephy(tp, 0x13, 0x0000);
4533
4534         tg3_writephy(tp, 0x11, 0x0a50);
4535         udelay(40);
4536         tg3_writephy(tp, 0x11, 0x0a10);
4537
4538         /* Wait for signal to stabilize */
4539         /* XXX schedule_timeout() ... */
4540         for (i = 0; i < 15000; i++)
4541                 udelay(10);
4542
4543         /* Deselect the channel register so we can read the PHYID
4544          * later.
4545          */
4546         tg3_writephy(tp, 0x10, 0x8011);
4547 }
4548
4549 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4550 {
4551         u16 flowctrl;
4552         u32 sg_dig_ctrl, sg_dig_status;
4553         u32 serdes_cfg, expected_sg_dig_ctrl;
4554         int workaround, port_a;
4555         int current_link_up;
4556
4557         serdes_cfg = 0;
4558         expected_sg_dig_ctrl = 0;
4559         workaround = 0;
4560         port_a = 1;
4561         current_link_up = 0;
4562
4563         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4564             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4565                 workaround = 1;
4566                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4567                         port_a = 0;
4568
4569                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4570                 /* preserve bits 20-23 for voltage regulator */
4571                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4572         }
4573
4574         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4575
4576         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4577                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4578                         if (workaround) {
4579                                 u32 val = serdes_cfg;
4580
4581                                 if (port_a)
4582                                         val |= 0xc010000;
4583                                 else
4584                                         val |= 0x4010000;
4585                                 tw32_f(MAC_SERDES_CFG, val);
4586                         }
4587
4588                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4589                 }
4590                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4591                         tg3_setup_flow_control(tp, 0, 0);
4592                         current_link_up = 1;
4593                 }
4594                 goto out;
4595         }
4596
4597         /* Want auto-negotiation.  */
4598         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4599
4600         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4601         if (flowctrl & ADVERTISE_1000XPAUSE)
4602                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4603         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4604                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4605
4606         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4607                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4608                     tp->serdes_counter &&
4609                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4610                                     MAC_STATUS_RCVD_CFG)) ==
4611                      MAC_STATUS_PCS_SYNCED)) {
4612                         tp->serdes_counter--;
4613                         current_link_up = 1;
4614                         goto out;
4615                 }
4616 restart_autoneg:
4617                 if (workaround)
4618                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4619                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4620                 udelay(5);
4621                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4622
4623                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4624                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4625         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4626                                  MAC_STATUS_SIGNAL_DET)) {
4627                 sg_dig_status = tr32(SG_DIG_STATUS);
4628                 mac_status = tr32(MAC_STATUS);
4629
4630                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4631                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4632                         u32 local_adv = 0, remote_adv = 0;
4633
4634                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4635                                 local_adv |= ADVERTISE_1000XPAUSE;
4636                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4637                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4638
4639                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4640                                 remote_adv |= LPA_1000XPAUSE;
4641                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4642                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4643
4644                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4645                         current_link_up = 1;
4646                         tp->serdes_counter = 0;
4647                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4648                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4649                         if (tp->serdes_counter)
4650                                 tp->serdes_counter--;
4651                         else {
4652                                 if (workaround) {
4653                                         u32 val = serdes_cfg;
4654
4655                                         if (port_a)
4656                                                 val |= 0xc010000;
4657                                         else
4658                                                 val |= 0x4010000;
4659
4660                                         tw32_f(MAC_SERDES_CFG, val);
4661                                 }
4662
4663                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4664                                 udelay(40);
4665
4666                                 /* Link parallel detection - link is up */
4667                                 /* only if we have PCS_SYNC and not */
4668                                 /* receiving config code words */
4669                                 mac_status = tr32(MAC_STATUS);
4670                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4671                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4672                                         tg3_setup_flow_control(tp, 0, 0);
4673                                         current_link_up = 1;
4674                                         tp->phy_flags |=
4675                                                 TG3_PHYFLG_PARALLEL_DETECT;
4676                                         tp->serdes_counter =
4677                                                 SERDES_PARALLEL_DET_TIMEOUT;
4678                                 } else
4679                                         goto restart_autoneg;
4680                         }
4681                 }
4682         } else {
4683                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4684                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4685         }
4686
4687 out:
4688         return current_link_up;
4689 }
4690
4691 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4692 {
4693         int current_link_up = 0;
4694
4695         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4696                 goto out;
4697
4698         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4699                 u32 txflags, rxflags;
4700                 int i;
4701
4702                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4703                         u32 local_adv = 0, remote_adv = 0;
4704
4705                         if (txflags & ANEG_CFG_PS1)
4706                                 local_adv |= ADVERTISE_1000XPAUSE;
4707                         if (txflags & ANEG_CFG_PS2)
4708                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4709
4710                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4711                                 remote_adv |= LPA_1000XPAUSE;
4712                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4713                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4714
4715                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4716
4717                         current_link_up = 1;
4718                 }
4719                 for (i = 0; i < 30; i++) {
4720                         udelay(20);
4721                         tw32_f(MAC_STATUS,
4722                                (MAC_STATUS_SYNC_CHANGED |
4723                                 MAC_STATUS_CFG_CHANGED));
4724                         udelay(40);
4725                         if ((tr32(MAC_STATUS) &
4726                              (MAC_STATUS_SYNC_CHANGED |
4727                               MAC_STATUS_CFG_CHANGED)) == 0)
4728                                 break;
4729                 }
4730
4731                 mac_status = tr32(MAC_STATUS);
4732                 if (current_link_up == 0 &&
4733                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4734                     !(mac_status & MAC_STATUS_RCVD_CFG))
4735                         current_link_up = 1;
4736         } else {
4737                 tg3_setup_flow_control(tp, 0, 0);
4738
4739                 /* Forcing 1000FD link up. */
4740                 current_link_up = 1;
4741
4742                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4743                 udelay(40);
4744
4745                 tw32_f(MAC_MODE, tp->mac_mode);
4746                 udelay(40);
4747         }
4748
4749 out:
4750         return current_link_up;
4751 }
4752
4753 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4754 {
4755         u32 orig_pause_cfg;
4756         u16 orig_active_speed;
4757         u8 orig_active_duplex;
4758         u32 mac_status;
4759         int current_link_up;
4760         int i;
4761
4762         orig_pause_cfg = tp->link_config.active_flowctrl;
4763         orig_active_speed = tp->link_config.active_speed;
4764         orig_active_duplex = tp->link_config.active_duplex;
4765
4766         if (!tg3_flag(tp, HW_AUTONEG) &&
4767             netif_carrier_ok(tp->dev) &&
4768             tg3_flag(tp, INIT_COMPLETE)) {
4769                 mac_status = tr32(MAC_STATUS);
4770                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4771                                MAC_STATUS_SIGNAL_DET |
4772                                MAC_STATUS_CFG_CHANGED |
4773                                MAC_STATUS_RCVD_CFG);
4774                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4775                                    MAC_STATUS_SIGNAL_DET)) {
4776                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4777                                             MAC_STATUS_CFG_CHANGED));
4778                         return 0;
4779                 }
4780         }
4781
4782         tw32_f(MAC_TX_AUTO_NEG, 0);
4783
4784         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4785         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4786         tw32_f(MAC_MODE, tp->mac_mode);
4787         udelay(40);
4788
4789         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4790                 tg3_init_bcm8002(tp);
4791
4792         /* Enable link change event even when serdes polling.  */
4793         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4794         udelay(40);
4795
4796         current_link_up = 0;
4797         mac_status = tr32(MAC_STATUS);
4798
4799         if (tg3_flag(tp, HW_AUTONEG))
4800                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4801         else
4802                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4803
4804         tp->napi[0].hw_status->status =
4805                 (SD_STATUS_UPDATED |
4806                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4807
4808         for (i = 0; i < 100; i++) {
4809                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4810                                     MAC_STATUS_CFG_CHANGED));
4811                 udelay(5);
4812                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4813                                          MAC_STATUS_CFG_CHANGED |
4814                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4815                         break;
4816         }
4817
4818         mac_status = tr32(MAC_STATUS);
4819         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4820                 current_link_up = 0;
4821                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4822                     tp->serdes_counter == 0) {
4823                         tw32_f(MAC_MODE, (tp->mac_mode |
4824                                           MAC_MODE_SEND_CONFIGS));
4825                         udelay(1);
4826                         tw32_f(MAC_MODE, tp->mac_mode);
4827                 }
4828         }
4829
4830         if (current_link_up == 1) {
4831                 tp->link_config.active_speed = SPEED_1000;
4832                 tp->link_config.active_duplex = DUPLEX_FULL;
4833                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4834                                     LED_CTRL_LNKLED_OVERRIDE |
4835                                     LED_CTRL_1000MBPS_ON));
4836         } else {
4837                 tp->link_config.active_speed = SPEED_INVALID;
4838                 tp->link_config.active_duplex = DUPLEX_INVALID;
4839                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4840                                     LED_CTRL_LNKLED_OVERRIDE |
4841                                     LED_CTRL_TRAFFIC_OVERRIDE));
4842         }
4843
4844         if (current_link_up != netif_carrier_ok(tp->dev)) {
4845                 if (current_link_up)
4846                         netif_carrier_on(tp->dev);
4847                 else
4848                         netif_carrier_off(tp->dev);
4849                 tg3_link_report(tp);
4850         } else {
4851                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4852                 if (orig_pause_cfg != now_pause_cfg ||
4853                     orig_active_speed != tp->link_config.active_speed ||
4854                     orig_active_duplex != tp->link_config.active_duplex)
4855                         tg3_link_report(tp);
4856         }
4857
4858         return 0;
4859 }
4860
4861 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4862 {
4863         int current_link_up, err = 0;
4864         u32 bmsr, bmcr;
4865         u16 current_speed;
4866         u8 current_duplex;
4867         u32 local_adv, remote_adv;
4868
4869         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4870         tw32_f(MAC_MODE, tp->mac_mode);
4871         udelay(40);
4872
4873         tw32(MAC_EVENT, 0);
4874
4875         tw32_f(MAC_STATUS,
4876              (MAC_STATUS_SYNC_CHANGED |
4877               MAC_STATUS_CFG_CHANGED |
4878               MAC_STATUS_MI_COMPLETION |
4879               MAC_STATUS_LNKSTATE_CHANGED));
4880         udelay(40);
4881
4882         if (force_reset)
4883                 tg3_phy_reset(tp);
4884
4885         current_link_up = 0;
4886         current_speed = SPEED_INVALID;
4887         current_duplex = DUPLEX_INVALID;
4888
4889         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4890         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4892                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4893                         bmsr |= BMSR_LSTATUS;
4894                 else
4895                         bmsr &= ~BMSR_LSTATUS;
4896         }
4897
4898         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4899
4900         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4901             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4902                 /* do nothing, just check for link up at the end */
4903         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4904                 u32 adv, new_adv;
4905
4906                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4907                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4908                                   ADVERTISE_1000XPAUSE |
4909                                   ADVERTISE_1000XPSE_ASYM |
4910                                   ADVERTISE_SLCT);
4911
4912                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4913
4914                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4915                         new_adv |= ADVERTISE_1000XHALF;
4916                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4917                         new_adv |= ADVERTISE_1000XFULL;
4918
4919                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4920                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4921                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4922                         tg3_writephy(tp, MII_BMCR, bmcr);
4923
4924                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4925                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4926                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4927
4928                         return err;
4929                 }
4930         } else {
4931                 u32 new_bmcr;
4932
4933                 bmcr &= ~BMCR_SPEED1000;
4934                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4935
4936                 if (tp->link_config.duplex == DUPLEX_FULL)
4937                         new_bmcr |= BMCR_FULLDPLX;
4938
4939                 if (new_bmcr != bmcr) {
4940                         /* BMCR_SPEED1000 is a reserved bit that needs
4941                          * to be set on write.
4942                          */
4943                         new_bmcr |= BMCR_SPEED1000;
4944
4945                         /* Force a linkdown */
4946                         if (netif_carrier_ok(tp->dev)) {
4947                                 u32 adv;
4948
4949                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4950                                 adv &= ~(ADVERTISE_1000XFULL |
4951                                          ADVERTISE_1000XHALF |
4952                                          ADVERTISE_SLCT);
4953                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4954                                 tg3_writephy(tp, MII_BMCR, bmcr |
4955                                                            BMCR_ANRESTART |
4956                                                            BMCR_ANENABLE);
4957                                 udelay(10);
4958                                 netif_carrier_off(tp->dev);
4959                         }
4960                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4961                         bmcr = new_bmcr;
4962                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4963                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4964                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4965                             ASIC_REV_5714) {
4966                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4967                                         bmsr |= BMSR_LSTATUS;
4968                                 else
4969                                         bmsr &= ~BMSR_LSTATUS;
4970                         }
4971                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4972                 }
4973         }
4974
4975         if (bmsr & BMSR_LSTATUS) {
4976                 current_speed = SPEED_1000;
4977                 current_link_up = 1;
4978                 if (bmcr & BMCR_FULLDPLX)
4979                         current_duplex = DUPLEX_FULL;
4980                 else
4981                         current_duplex = DUPLEX_HALF;
4982
4983                 local_adv = 0;
4984                 remote_adv = 0;
4985
4986                 if (bmcr & BMCR_ANENABLE) {
4987                         u32 common;
4988
4989                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4990                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4991                         common = local_adv & remote_adv;
4992                         if (common & (ADVERTISE_1000XHALF |
4993                                       ADVERTISE_1000XFULL)) {
4994                                 if (common & ADVERTISE_1000XFULL)
4995                                         current_duplex = DUPLEX_FULL;
4996                                 else
4997                                         current_duplex = DUPLEX_HALF;
4998                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4999                                 /* Link is up via parallel detect */
5000                         } else {
5001                                 current_link_up = 0;
5002                         }
5003                 }
5004         }
5005
5006         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5007                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5008
5009         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010         if (tp->link_config.active_duplex == DUPLEX_HALF)
5011                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5012
5013         tw32_f(MAC_MODE, tp->mac_mode);
5014         udelay(40);
5015
5016         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5017
5018         tp->link_config.active_speed = current_speed;
5019         tp->link_config.active_duplex = current_duplex;
5020
5021         if (current_link_up != netif_carrier_ok(tp->dev)) {
5022                 if (current_link_up)
5023                         netif_carrier_on(tp->dev);
5024                 else {
5025                         netif_carrier_off(tp->dev);
5026                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5027                 }
5028                 tg3_link_report(tp);
5029         }
5030         return err;
5031 }
5032
5033 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5034 {
5035         if (tp->serdes_counter) {
5036                 /* Give autoneg time to complete. */
5037                 tp->serdes_counter--;
5038                 return;
5039         }
5040
5041         if (!netif_carrier_ok(tp->dev) &&
5042             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5043                 u32 bmcr;
5044
5045                 tg3_readphy(tp, MII_BMCR, &bmcr);
5046                 if (bmcr & BMCR_ANENABLE) {
5047                         u32 phy1, phy2;
5048
5049                         /* Select shadow register 0x1f */
5050                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5051                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5052
5053                         /* Select expansion interrupt status register */
5054                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5055                                          MII_TG3_DSP_EXP1_INT_STAT);
5056                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5057                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5058
5059                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5060                                 /* We have signal detect and not receiving
5061                                  * config code words, link is up by parallel
5062                                  * detection.
5063                                  */
5064
5065                                 bmcr &= ~BMCR_ANENABLE;
5066                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5067                                 tg3_writephy(tp, MII_BMCR, bmcr);
5068                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5069                         }
5070                 }
5071         } else if (netif_carrier_ok(tp->dev) &&
5072                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5073                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5074                 u32 phy2;
5075
5076                 /* Select expansion interrupt status register */
5077                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5078                                  MII_TG3_DSP_EXP1_INT_STAT);
5079                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5080                 if (phy2 & 0x20) {
5081                         u32 bmcr;
5082
5083                         /* Config code words received, turn on autoneg. */
5084                         tg3_readphy(tp, MII_BMCR, &bmcr);
5085                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5086
5087                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5088
5089                 }
5090         }
5091 }
5092
5093 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5094 {
5095         u32 val;
5096         int err;
5097
5098         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5099                 err = tg3_setup_fiber_phy(tp, force_reset);
5100         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5101                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5102         else
5103                 err = tg3_setup_copper_phy(tp, force_reset);
5104
5105         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5106                 u32 scale;
5107
5108                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5109                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5110                         scale = 65;
5111                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5112                         scale = 6;
5113                 else
5114                         scale = 12;
5115
5116                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5117                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5118                 tw32(GRC_MISC_CFG, val);
5119         }
5120
5121         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5122               (6 << TX_LENGTHS_IPG_SHIFT);
5123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5124                 val |= tr32(MAC_TX_LENGTHS) &
5125                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5126                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5127
5128         if (tp->link_config.active_speed == SPEED_1000 &&
5129             tp->link_config.active_duplex == DUPLEX_HALF)
5130                 tw32(MAC_TX_LENGTHS, val |
5131                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5132         else
5133                 tw32(MAC_TX_LENGTHS, val |
5134                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5135
5136         if (!tg3_flag(tp, 5705_PLUS)) {
5137                 if (netif_carrier_ok(tp->dev)) {
5138                         tw32(HOSTCC_STAT_COAL_TICKS,
5139                              tp->coal.stats_block_coalesce_usecs);
5140                 } else {
5141                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5142                 }
5143         }
5144
5145         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5146                 val = tr32(PCIE_PWR_MGMT_THRESH);
5147                 if (!netif_carrier_ok(tp->dev))
5148                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5149                               tp->pwrmgmt_thresh;
5150                 else
5151                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5152                 tw32(PCIE_PWR_MGMT_THRESH, val);
5153         }
5154
5155         return err;
5156 }
5157
5158 static inline int tg3_irq_sync(struct tg3 *tp)
5159 {
5160         return tp->irq_sync;
5161 }
5162
5163 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5164 {
5165         int i;
5166
5167         dst = (u32 *)((u8 *)dst + off);
5168         for (i = 0; i < len; i += sizeof(u32))
5169                 *dst++ = tr32(off + i);
5170 }
5171
5172 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5173 {
5174         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5175         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5176         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5177         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5178         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5179         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5180         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5181         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5182         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5183         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5184         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5185         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5186         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5187         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5188         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5189         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5190         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5191         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5192         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5193
5194         if (tg3_flag(tp, SUPPORT_MSIX))
5195                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5196
5197         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5198         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5199         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5200         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5201         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5202         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5203         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5204         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5205
5206         if (!tg3_flag(tp, 5705_PLUS)) {
5207                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5208                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5209                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5210         }
5211
5212         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5213         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5214         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5215         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5216         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5217
5218         if (tg3_flag(tp, NVRAM))
5219                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5220 }
5221
5222 static void tg3_dump_state(struct tg3 *tp)
5223 {
5224         int i;
5225         u32 *regs;
5226
5227         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5228         if (!regs) {
5229                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5230                 return;
5231         }
5232
5233         if (tg3_flag(tp, PCI_EXPRESS)) {
5234                 /* Read up to but not including private PCI registers */
5235                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5236                         regs[i / sizeof(u32)] = tr32(i);
5237         } else
5238                 tg3_dump_legacy_regs(tp, regs);
5239
5240         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5241                 if (!regs[i + 0] && !regs[i + 1] &&
5242                     !regs[i + 2] && !regs[i + 3])
5243                         continue;
5244
5245                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5246                            i * 4,
5247                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5248         }
5249
5250         kfree(regs);
5251
5252         for (i = 0; i < tp->irq_cnt; i++) {
5253                 struct tg3_napi *tnapi = &tp->napi[i];
5254
5255                 /* SW status block */
5256                 netdev_err(tp->dev,
5257                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5258                            i,
5259                            tnapi->hw_status->status,
5260                            tnapi->hw_status->status_tag,
5261                            tnapi->hw_status->rx_jumbo_consumer,
5262                            tnapi->hw_status->rx_consumer,
5263                            tnapi->hw_status->rx_mini_consumer,
5264                            tnapi->hw_status->idx[0].rx_producer,
5265                            tnapi->hw_status->idx[0].tx_consumer);
5266
5267                 netdev_err(tp->dev,
5268                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5269                            i,
5270                            tnapi->last_tag, tnapi->last_irq_tag,
5271                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5272                            tnapi->rx_rcb_ptr,
5273                            tnapi->prodring.rx_std_prod_idx,
5274                            tnapi->prodring.rx_std_cons_idx,
5275                            tnapi->prodring.rx_jmb_prod_idx,
5276                            tnapi->prodring.rx_jmb_cons_idx);
5277         }
5278 }
5279
5280 /* This is called whenever we suspect that the system chipset is re-
5281  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5282  * is bogus tx completions. We try to recover by setting the
5283  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5284  * in the workqueue.
5285  */
5286 static void tg3_tx_recover(struct tg3 *tp)
5287 {
5288         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5289                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5290
5291         netdev_warn(tp->dev,
5292                     "The system may be re-ordering memory-mapped I/O "
5293                     "cycles to the network device, attempting to recover. "
5294                     "Please report the problem to the driver maintainer "
5295                     "and include system chipset information.\n");
5296
5297         spin_lock(&tp->lock);
5298         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5299         spin_unlock(&tp->lock);
5300 }
5301
5302 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5303 {
5304         /* Tell compiler to fetch tx indices from memory. */
5305         barrier();
5306         return tnapi->tx_pending -
5307                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5308 }
5309
5310 /* Tigon3 never reports partial packet sends.  So we do not
5311  * need special logic to handle SKBs that have not had all
5312  * of their frags sent yet, like SunGEM does.
5313  */
5314 static void tg3_tx(struct tg3_napi *tnapi)
5315 {
5316         struct tg3 *tp = tnapi->tp;
5317         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5318         u32 sw_idx = tnapi->tx_cons;
5319         struct netdev_queue *txq;
5320         int index = tnapi - tp->napi;
5321
5322         if (tg3_flag(tp, ENABLE_TSS))
5323                 index--;
5324
5325         txq = netdev_get_tx_queue(tp->dev, index);
5326
5327         while (sw_idx != hw_idx) {
5328                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5329                 struct sk_buff *skb = ri->skb;
5330                 int i, tx_bug = 0;
5331
5332                 if (unlikely(skb == NULL)) {
5333                         tg3_tx_recover(tp);
5334                         return;
5335                 }
5336
5337                 pci_unmap_single(tp->pdev,
5338                                  dma_unmap_addr(ri, mapping),
5339                                  skb_headlen(skb),
5340                                  PCI_DMA_TODEVICE);
5341
5342                 ri->skb = NULL;
5343
5344                 while (ri->fragmented) {
5345                         ri->fragmented = false;
5346                         sw_idx = NEXT_TX(sw_idx);
5347                         ri = &tnapi->tx_buffers[sw_idx];
5348                 }
5349
5350                 sw_idx = NEXT_TX(sw_idx);
5351
5352                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5353                         ri = &tnapi->tx_buffers[sw_idx];
5354                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5355                                 tx_bug = 1;
5356
5357                         pci_unmap_page(tp->pdev,
5358                                        dma_unmap_addr(ri, mapping),
5359                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5360                                        PCI_DMA_TODEVICE);
5361
5362                         while (ri->fragmented) {
5363                                 ri->fragmented = false;
5364                                 sw_idx = NEXT_TX(sw_idx);
5365                                 ri = &tnapi->tx_buffers[sw_idx];
5366                         }
5367
5368                         sw_idx = NEXT_TX(sw_idx);
5369                 }
5370
5371                 dev_kfree_skb(skb);
5372
5373                 if (unlikely(tx_bug)) {
5374                         tg3_tx_recover(tp);
5375                         return;
5376                 }
5377         }
5378
5379         tnapi->tx_cons = sw_idx;
5380
5381         /* Need to make the tx_cons update visible to tg3_start_xmit()
5382          * before checking for netif_queue_stopped().  Without the
5383          * memory barrier, there is a small possibility that tg3_start_xmit()
5384          * will miss it and cause the queue to be stopped forever.
5385          */
5386         smp_mb();
5387
5388         if (unlikely(netif_tx_queue_stopped(txq) &&
5389                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5390                 __netif_tx_lock(txq, smp_processor_id());
5391                 if (netif_tx_queue_stopped(txq) &&
5392                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5393                         netif_tx_wake_queue(txq);
5394                 __netif_tx_unlock(txq);
5395         }
5396 }
5397
5398 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5399 {
5400         if (!ri->skb)
5401                 return;
5402
5403         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5404                          map_sz, PCI_DMA_FROMDEVICE);
5405         dev_kfree_skb_any(ri->skb);
5406         ri->skb = NULL;
5407 }
5408
5409 /* Returns size of skb allocated or < 0 on error.
5410  *
5411  * We only need to fill in the address because the other members
5412  * of the RX descriptor are invariant, see tg3_init_rings.
5413  *
5414  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5415  * posting buffers we only dirty the first cache line of the RX
5416  * descriptor (containing the address).  Whereas for the RX status
5417  * buffers the cpu only reads the last cacheline of the RX descriptor
5418  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5419  */
5420 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5421                             u32 opaque_key, u32 dest_idx_unmasked)
5422 {
5423         struct tg3_rx_buffer_desc *desc;
5424         struct ring_info *map;
5425         struct sk_buff *skb;
5426         dma_addr_t mapping;
5427         int skb_size, dest_idx;
5428
5429         switch (opaque_key) {
5430         case RXD_OPAQUE_RING_STD:
5431                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5432                 desc = &tpr->rx_std[dest_idx];
5433                 map = &tpr->rx_std_buffers[dest_idx];
5434                 skb_size = tp->rx_pkt_map_sz;
5435                 break;
5436
5437         case RXD_OPAQUE_RING_JUMBO:
5438                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5439                 desc = &tpr->rx_jmb[dest_idx].std;
5440                 map = &tpr->rx_jmb_buffers[dest_idx];
5441                 skb_size = TG3_RX_JMB_MAP_SZ;
5442                 break;
5443
5444         default:
5445                 return -EINVAL;
5446         }
5447
5448         /* Do not overwrite any of the map or rp information
5449          * until we are sure we can commit to a new buffer.
5450          *
5451          * Callers depend upon this behavior and assume that
5452          * we leave everything unchanged if we fail.
5453          */
5454         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5455         if (skb == NULL)
5456                 return -ENOMEM;
5457
5458         skb_reserve(skb, TG3_RX_OFFSET(tp));
5459
5460         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5461                                  PCI_DMA_FROMDEVICE);
5462         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5463                 dev_kfree_skb(skb);
5464                 return -EIO;
5465         }
5466
5467         map->skb = skb;
5468         dma_unmap_addr_set(map, mapping, mapping);
5469
5470         desc->addr_hi = ((u64)mapping >> 32);
5471         desc->addr_lo = ((u64)mapping & 0xffffffff);
5472
5473         return skb_size;
5474 }
5475
5476 /* We only need to move over in the address because the other
5477  * members of the RX descriptor are invariant.  See notes above
5478  * tg3_alloc_rx_skb for full details.
5479  */
5480 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5481                            struct tg3_rx_prodring_set *dpr,
5482                            u32 opaque_key, int src_idx,
5483                            u32 dest_idx_unmasked)
5484 {
5485         struct tg3 *tp = tnapi->tp;
5486         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5487         struct ring_info *src_map, *dest_map;
5488         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5489         int dest_idx;
5490
5491         switch (opaque_key) {
5492         case RXD_OPAQUE_RING_STD:
5493                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5494                 dest_desc = &dpr->rx_std[dest_idx];
5495                 dest_map = &dpr->rx_std_buffers[dest_idx];
5496                 src_desc = &spr->rx_std[src_idx];
5497                 src_map = &spr->rx_std_buffers[src_idx];
5498                 break;
5499
5500         case RXD_OPAQUE_RING_JUMBO:
5501                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5502                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5503                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5504                 src_desc = &spr->rx_jmb[src_idx].std;
5505                 src_map = &spr->rx_jmb_buffers[src_idx];
5506                 break;
5507
5508         default:
5509                 return;
5510         }
5511
5512         dest_map->skb = src_map->skb;
5513         dma_unmap_addr_set(dest_map, mapping,
5514                            dma_unmap_addr(src_map, mapping));
5515         dest_desc->addr_hi = src_desc->addr_hi;
5516         dest_desc->addr_lo = src_desc->addr_lo;
5517
5518         /* Ensure that the update to the skb happens after the physical
5519          * addresses have been transferred to the new BD location.
5520          */
5521         smp_wmb();
5522
5523         src_map->skb = NULL;
5524 }
5525
5526 /* The RX ring scheme is composed of multiple rings which post fresh
5527  * buffers to the chip, and one special ring the chip uses to report
5528  * status back to the host.
5529  *
5530  * The special ring reports the status of received packets to the
5531  * host.  The chip does not write into the original descriptor the
5532  * RX buffer was obtained from.  The chip simply takes the original
5533  * descriptor as provided by the host, updates the status and length
5534  * field, then writes this into the next status ring entry.
5535  *
5536  * Each ring the host uses to post buffers to the chip is described
5537  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5538  * it is first placed into the on-chip ram.  When the packet's length
5539  * is known, it walks down the TG3_BDINFO entries to select the ring.
5540  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5541  * which is within the range of the new packet's length is chosen.
5542  *
5543  * The "separate ring for rx status" scheme may sound queer, but it makes
5544  * sense from a cache coherency perspective.  If only the host writes
5545  * to the buffer post rings, and only the chip writes to the rx status
5546  * rings, then cache lines never move beyond shared-modified state.
5547  * If both the host and chip were to write into the same ring, cache line
5548  * eviction could occur since both entities want it in an exclusive state.
5549  */
5550 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5551 {
5552         struct tg3 *tp = tnapi->tp;
5553         u32 work_mask, rx_std_posted = 0;
5554         u32 std_prod_idx, jmb_prod_idx;
5555         u32 sw_idx = tnapi->rx_rcb_ptr;
5556         u16 hw_idx;
5557         int received;
5558         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5559
5560         hw_idx = *(tnapi->rx_rcb_prod_idx);
5561         /*
5562          * We need to order the read of hw_idx and the read of
5563          * the opaque cookie.
5564          */
5565         rmb();
5566         work_mask = 0;
5567         received = 0;
5568         std_prod_idx = tpr->rx_std_prod_idx;
5569         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5570         while (sw_idx != hw_idx && budget > 0) {
5571                 struct ring_info *ri;
5572                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5573                 unsigned int len;
5574                 struct sk_buff *skb;
5575                 dma_addr_t dma_addr;
5576                 u32 opaque_key, desc_idx, *post_ptr;
5577
5578                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5579                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5580                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5581                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5582                         dma_addr = dma_unmap_addr(ri, mapping);
5583                         skb = ri->skb;
5584                         post_ptr = &std_prod_idx;
5585                         rx_std_posted++;
5586                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5587                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5588                         dma_addr = dma_unmap_addr(ri, mapping);
5589                         skb = ri->skb;
5590                         post_ptr = &jmb_prod_idx;
5591                 } else
5592                         goto next_pkt_nopost;
5593
5594                 work_mask |= opaque_key;
5595
5596                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5597                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5598                 drop_it:
5599                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5600                                        desc_idx, *post_ptr);
5601                 drop_it_no_recycle:
5602                         /* Other statistics kept track of by card. */
5603                         tp->rx_dropped++;
5604                         goto next_pkt;
5605                 }
5606
5607                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5608                       ETH_FCS_LEN;
5609
5610                 if (len > TG3_RX_COPY_THRESH(tp)) {
5611                         int skb_size;
5612
5613                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5614                                                     *post_ptr);
5615                         if (skb_size < 0)
5616                                 goto drop_it;
5617
5618                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5619                                          PCI_DMA_FROMDEVICE);
5620
5621                         /* Ensure that the update to the skb happens
5622                          * after the usage of the old DMA mapping.
5623                          */
5624                         smp_wmb();
5625
5626                         ri->skb = NULL;
5627
5628                         skb_put(skb, len);
5629                 } else {
5630                         struct sk_buff *copy_skb;
5631
5632                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5633                                        desc_idx, *post_ptr);
5634
5635                         copy_skb = netdev_alloc_skb(tp->dev, len +
5636                                                     TG3_RAW_IP_ALIGN);
5637                         if (copy_skb == NULL)
5638                                 goto drop_it_no_recycle;
5639
5640                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5641                         skb_put(copy_skb, len);
5642                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5643                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5644                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645
5646                         /* We'll reuse the original ring buffer. */
5647                         skb = copy_skb;
5648                 }
5649
5650                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5651                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5652                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5653                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5654                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5655                 else
5656                         skb_checksum_none_assert(skb);
5657
5658                 skb->protocol = eth_type_trans(skb, tp->dev);
5659
5660                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5661                     skb->protocol != htons(ETH_P_8021Q)) {
5662                         dev_kfree_skb(skb);
5663                         goto drop_it_no_recycle;
5664                 }
5665
5666                 if (desc->type_flags & RXD_FLAG_VLAN &&
5667                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5668                         __vlan_hwaccel_put_tag(skb,
5669                                                desc->err_vlan & RXD_VLAN_MASK);
5670
5671                 napi_gro_receive(&tnapi->napi, skb);
5672
5673                 received++;
5674                 budget--;
5675
5676 next_pkt:
5677                 (*post_ptr)++;
5678
5679                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5680                         tpr->rx_std_prod_idx = std_prod_idx &
5681                                                tp->rx_std_ring_mask;
5682                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5683                                      tpr->rx_std_prod_idx);
5684                         work_mask &= ~RXD_OPAQUE_RING_STD;
5685                         rx_std_posted = 0;
5686                 }
5687 next_pkt_nopost:
5688                 sw_idx++;
5689                 sw_idx &= tp->rx_ret_ring_mask;
5690
5691                 /* Refresh hw_idx to see if there is new work */
5692                 if (sw_idx == hw_idx) {
5693                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5694                         rmb();
5695                 }
5696         }
5697
5698         /* ACK the status ring. */
5699         tnapi->rx_rcb_ptr = sw_idx;
5700         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5701
5702         /* Refill RX ring(s). */
5703         if (!tg3_flag(tp, ENABLE_RSS)) {
5704                 if (work_mask & RXD_OPAQUE_RING_STD) {
5705                         tpr->rx_std_prod_idx = std_prod_idx &
5706                                                tp->rx_std_ring_mask;
5707                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5708                                      tpr->rx_std_prod_idx);
5709                 }
5710                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5711                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5712                                                tp->rx_jmb_ring_mask;
5713                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5714                                      tpr->rx_jmb_prod_idx);
5715                 }
5716                 mmiowb();
5717         } else if (work_mask) {
5718                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5719                  * updated before the producer indices can be updated.
5720                  */
5721                 smp_wmb();
5722
5723                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5724                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5725
5726                 if (tnapi != &tp->napi[1])
5727                         napi_schedule(&tp->napi[1].napi);
5728         }
5729
5730         return received;
5731 }
5732
5733 static void tg3_poll_link(struct tg3 *tp)
5734 {
5735         /* handle link change and other phy events */
5736         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5737                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5738
5739                 if (sblk->status & SD_STATUS_LINK_CHG) {
5740                         sblk->status = SD_STATUS_UPDATED |
5741                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5742                         spin_lock(&tp->lock);
5743                         if (tg3_flag(tp, USE_PHYLIB)) {
5744                                 tw32_f(MAC_STATUS,
5745                                      (MAC_STATUS_SYNC_CHANGED |
5746                                       MAC_STATUS_CFG_CHANGED |
5747                                       MAC_STATUS_MI_COMPLETION |
5748                                       MAC_STATUS_LNKSTATE_CHANGED));
5749                                 udelay(40);
5750                         } else
5751                                 tg3_setup_phy(tp, 0);
5752                         spin_unlock(&tp->lock);
5753                 }
5754         }
5755 }
5756
5757 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5758                                 struct tg3_rx_prodring_set *dpr,
5759                                 struct tg3_rx_prodring_set *spr)
5760 {
5761         u32 si, di, cpycnt, src_prod_idx;
5762         int i, err = 0;
5763
5764         while (1) {
5765                 src_prod_idx = spr->rx_std_prod_idx;
5766
5767                 /* Make sure updates to the rx_std_buffers[] entries and the
5768                  * standard producer index are seen in the correct order.
5769                  */
5770                 smp_rmb();
5771
5772                 if (spr->rx_std_cons_idx == src_prod_idx)
5773                         break;
5774
5775                 if (spr->rx_std_cons_idx < src_prod_idx)
5776                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5777                 else
5778                         cpycnt = tp->rx_std_ring_mask + 1 -
5779                                  spr->rx_std_cons_idx;
5780
5781                 cpycnt = min(cpycnt,
5782                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5783
5784                 si = spr->rx_std_cons_idx;
5785                 di = dpr->rx_std_prod_idx;
5786
5787                 for (i = di; i < di + cpycnt; i++) {
5788                         if (dpr->rx_std_buffers[i].skb) {
5789                                 cpycnt = i - di;
5790                                 err = -ENOSPC;
5791                                 break;
5792                         }
5793                 }
5794
5795                 if (!cpycnt)
5796                         break;
5797
5798                 /* Ensure that updates to the rx_std_buffers ring and the
5799                  * shadowed hardware producer ring from tg3_recycle_skb() are
5800                  * ordered correctly WRT the skb check above.
5801                  */
5802                 smp_rmb();
5803
5804                 memcpy(&dpr->rx_std_buffers[di],
5805                        &spr->rx_std_buffers[si],
5806                        cpycnt * sizeof(struct ring_info));
5807
5808                 for (i = 0; i < cpycnt; i++, di++, si++) {
5809                         struct tg3_rx_buffer_desc *sbd, *dbd;
5810                         sbd = &spr->rx_std[si];
5811                         dbd = &dpr->rx_std[di];
5812                         dbd->addr_hi = sbd->addr_hi;
5813                         dbd->addr_lo = sbd->addr_lo;
5814                 }
5815
5816                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5817                                        tp->rx_std_ring_mask;
5818                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5819                                        tp->rx_std_ring_mask;
5820         }
5821
5822         while (1) {
5823                 src_prod_idx = spr->rx_jmb_prod_idx;
5824
5825                 /* Make sure updates to the rx_jmb_buffers[] entries and
5826                  * the jumbo producer index are seen in the correct order.
5827                  */
5828                 smp_rmb();
5829
5830                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5831                         break;
5832
5833                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5834                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5835                 else
5836                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5837                                  spr->rx_jmb_cons_idx;
5838
5839                 cpycnt = min(cpycnt,
5840                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5841
5842                 si = spr->rx_jmb_cons_idx;
5843                 di = dpr->rx_jmb_prod_idx;
5844
5845                 for (i = di; i < di + cpycnt; i++) {
5846                         if (dpr->rx_jmb_buffers[i].skb) {
5847                                 cpycnt = i - di;
5848                                 err = -ENOSPC;
5849                                 break;
5850                         }
5851                 }
5852
5853                 if (!cpycnt)
5854                         break;
5855
5856                 /* Ensure that updates to the rx_jmb_buffers ring and the
5857                  * shadowed hardware producer ring from tg3_recycle_skb() are
5858                  * ordered correctly WRT the skb check above.
5859                  */
5860                 smp_rmb();
5861
5862                 memcpy(&dpr->rx_jmb_buffers[di],
5863                        &spr->rx_jmb_buffers[si],
5864                        cpycnt * sizeof(struct ring_info));
5865
5866                 for (i = 0; i < cpycnt; i++, di++, si++) {
5867                         struct tg3_rx_buffer_desc *sbd, *dbd;
5868                         sbd = &spr->rx_jmb[si].std;
5869                         dbd = &dpr->rx_jmb[di].std;
5870                         dbd->addr_hi = sbd->addr_hi;
5871                         dbd->addr_lo = sbd->addr_lo;
5872                 }
5873
5874                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5875                                        tp->rx_jmb_ring_mask;
5876                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5877                                        tp->rx_jmb_ring_mask;
5878         }
5879
5880         return err;
5881 }
5882
5883 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5884 {
5885         struct tg3 *tp = tnapi->tp;
5886
5887         /* run TX completion thread */
5888         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5889                 tg3_tx(tnapi);
5890                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5891                         return work_done;
5892         }
5893
5894         /* run RX thread, within the bounds set by NAPI.
5895          * All RX "locking" is done by ensuring outside
5896          * code synchronizes with tg3->napi.poll()
5897          */
5898         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5899                 work_done += tg3_rx(tnapi, budget - work_done);
5900
5901         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5902                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5903                 int i, err = 0;
5904                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5905                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5906
5907                 for (i = 1; i < tp->irq_cnt; i++)
5908                         err |= tg3_rx_prodring_xfer(tp, dpr,
5909                                                     &tp->napi[i].prodring);
5910
5911                 wmb();
5912
5913                 if (std_prod_idx != dpr->rx_std_prod_idx)
5914                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5915                                      dpr->rx_std_prod_idx);
5916
5917                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5918                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5919                                      dpr->rx_jmb_prod_idx);
5920
5921                 mmiowb();
5922
5923                 if (err)
5924                         tw32_f(HOSTCC_MODE, tp->coal_now);
5925         }
5926
5927         return work_done;
5928 }
5929
5930 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5931 {
5932         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5933         struct tg3 *tp = tnapi->tp;
5934         int work_done = 0;
5935         struct tg3_hw_status *sblk = tnapi->hw_status;
5936
5937         while (1) {
5938                 work_done = tg3_poll_work(tnapi, work_done, budget);
5939
5940                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5941                         goto tx_recovery;
5942
5943                 if (unlikely(work_done >= budget))
5944                         break;
5945
5946                 /* tp->last_tag is used in tg3_int_reenable() below
5947                  * to tell the hw how much work has been processed,
5948                  * so we must read it before checking for more work.
5949                  */
5950                 tnapi->last_tag = sblk->status_tag;
5951                 tnapi->last_irq_tag = tnapi->last_tag;
5952                 rmb();
5953
5954                 /* check for RX/TX work to do */
5955                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5956                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5957                         napi_complete(napi);
5958                         /* Reenable interrupts. */
5959                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5960                         mmiowb();
5961                         break;
5962                 }
5963         }
5964
5965         return work_done;
5966
5967 tx_recovery:
5968         /* work_done is guaranteed to be less than budget. */
5969         napi_complete(napi);
5970         schedule_work(&tp->reset_task);
5971         return work_done;
5972 }
5973
5974 static void tg3_process_error(struct tg3 *tp)
5975 {
5976         u32 val;
5977         bool real_error = false;
5978
5979         if (tg3_flag(tp, ERROR_PROCESSED))
5980                 return;
5981
5982         /* Check Flow Attention register */
5983         val = tr32(HOSTCC_FLOW_ATTN);
5984         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5985                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5986                 real_error = true;
5987         }
5988
5989         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5990                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5991                 real_error = true;
5992         }
5993
5994         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5995                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5996                 real_error = true;
5997         }
5998
5999         if (!real_error)
6000                 return;
6001
6002         tg3_dump_state(tp);
6003
6004         tg3_flag_set(tp, ERROR_PROCESSED);
6005         schedule_work(&tp->reset_task);
6006 }
6007
6008 static int tg3_poll(struct napi_struct *napi, int budget)
6009 {
6010         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6011         struct tg3 *tp = tnapi->tp;
6012         int work_done = 0;
6013         struct tg3_hw_status *sblk = tnapi->hw_status;
6014
6015         while (1) {
6016                 if (sblk->status & SD_STATUS_ERROR)
6017                         tg3_process_error(tp);
6018
6019                 tg3_poll_link(tp);
6020
6021                 work_done = tg3_poll_work(tnapi, work_done, budget);
6022
6023                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6024                         goto tx_recovery;
6025
6026                 if (unlikely(work_done >= budget))
6027                         break;
6028
6029                 if (tg3_flag(tp, TAGGED_STATUS)) {
6030                         /* tp->last_tag is used in tg3_int_reenable() below
6031                          * to tell the hw how much work has been processed,
6032                          * so we must read it before checking for more work.
6033                          */
6034                         tnapi->last_tag = sblk->status_tag;
6035                         tnapi->last_irq_tag = tnapi->last_tag;
6036                         rmb();
6037                 } else
6038                         sblk->status &= ~SD_STATUS_UPDATED;
6039
6040                 if (likely(!tg3_has_work(tnapi))) {
6041                         napi_complete(napi);
6042                         tg3_int_reenable(tnapi);
6043                         break;
6044                 }
6045         }
6046
6047         return work_done;
6048
6049 tx_recovery:
6050         /* work_done is guaranteed to be less than budget. */
6051         napi_complete(napi);
6052         schedule_work(&tp->reset_task);
6053         return work_done;
6054 }
6055
6056 static void tg3_napi_disable(struct tg3 *tp)
6057 {
6058         int i;
6059
6060         for (i = tp->irq_cnt - 1; i >= 0; i--)
6061                 napi_disable(&tp->napi[i].napi);
6062 }
6063
6064 static void tg3_napi_enable(struct tg3 *tp)
6065 {
6066         int i;
6067
6068         for (i = 0; i < tp->irq_cnt; i++)
6069                 napi_enable(&tp->napi[i].napi);
6070 }
6071
6072 static void tg3_napi_init(struct tg3 *tp)
6073 {
6074         int i;
6075
6076         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6077         for (i = 1; i < tp->irq_cnt; i++)
6078                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6079 }
6080
6081 static void tg3_napi_fini(struct tg3 *tp)
6082 {
6083         int i;
6084
6085         for (i = 0; i < tp->irq_cnt; i++)
6086                 netif_napi_del(&tp->napi[i].napi);
6087 }
6088
6089 static inline void tg3_netif_stop(struct tg3 *tp)
6090 {
6091         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6092         tg3_napi_disable(tp);
6093         netif_tx_disable(tp->dev);
6094 }
6095
6096 static inline void tg3_netif_start(struct tg3 *tp)
6097 {
6098         /* NOTE: unconditional netif_tx_wake_all_queues is only
6099          * appropriate so long as all callers are assured to
6100          * have free tx slots (such as after tg3_init_hw)
6101          */
6102         netif_tx_wake_all_queues(tp->dev);
6103
6104         tg3_napi_enable(tp);
6105         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6106         tg3_enable_ints(tp);
6107 }
6108
6109 static void tg3_irq_quiesce(struct tg3 *tp)
6110 {
6111         int i;
6112
6113         BUG_ON(tp->irq_sync);
6114
6115         tp->irq_sync = 1;
6116         smp_mb();
6117
6118         for (i = 0; i < tp->irq_cnt; i++)
6119                 synchronize_irq(tp->napi[i].irq_vec);
6120 }
6121
6122 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6123  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6124  * with as well.  Most of the time, this is not necessary except when
6125  * shutting down the device.
6126  */
6127 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6128 {
6129         spin_lock_bh(&tp->lock);
6130         if (irq_sync)
6131                 tg3_irq_quiesce(tp);
6132 }
6133
6134 static inline void tg3_full_unlock(struct tg3 *tp)
6135 {
6136         spin_unlock_bh(&tp->lock);
6137 }
6138
6139 /* One-shot MSI handler - Chip automatically disables interrupt
6140  * after sending MSI so driver doesn't have to do it.
6141  */
6142 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6143 {
6144         struct tg3_napi *tnapi = dev_id;
6145         struct tg3 *tp = tnapi->tp;
6146
6147         prefetch(tnapi->hw_status);
6148         if (tnapi->rx_rcb)
6149                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6150
6151         if (likely(!tg3_irq_sync(tp)))
6152                 napi_schedule(&tnapi->napi);
6153
6154         return IRQ_HANDLED;
6155 }
6156
6157 /* MSI ISR - No need to check for interrupt sharing and no need to
6158  * flush status block and interrupt mailbox. PCI ordering rules
6159  * guarantee that MSI will arrive after the status block.
6160  */
6161 static irqreturn_t tg3_msi(int irq, void *dev_id)
6162 {
6163         struct tg3_napi *tnapi = dev_id;
6164         struct tg3 *tp = tnapi->tp;
6165
6166         prefetch(tnapi->hw_status);
6167         if (tnapi->rx_rcb)
6168                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6169         /*
6170          * Writing any value to intr-mbox-0 clears PCI INTA# and
6171          * chip-internal interrupt pending events.
6172          * Writing non-zero to intr-mbox-0 additional tells the
6173          * NIC to stop sending us irqs, engaging "in-intr-handler"
6174          * event coalescing.
6175          */
6176         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6177         if (likely(!tg3_irq_sync(tp)))
6178                 napi_schedule(&tnapi->napi);
6179
6180         return IRQ_RETVAL(1);
6181 }
6182
6183 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6184 {
6185         struct tg3_napi *tnapi = dev_id;
6186         struct tg3 *tp = tnapi->tp;
6187         struct tg3_hw_status *sblk = tnapi->hw_status;
6188         unsigned int handled = 1;
6189
6190         /* In INTx mode, it is possible for the interrupt to arrive at
6191          * the CPU before the status block posted prior to the interrupt.
6192          * Reading the PCI State register will confirm whether the
6193          * interrupt is ours and will flush the status block.
6194          */
6195         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6196                 if (tg3_flag(tp, CHIP_RESETTING) ||
6197                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6198                         handled = 0;
6199                         goto out;
6200                 }
6201         }
6202
6203         /*
6204          * Writing any value to intr-mbox-0 clears PCI INTA# and
6205          * chip-internal interrupt pending events.
6206          * Writing non-zero to intr-mbox-0 additional tells the
6207          * NIC to stop sending us irqs, engaging "in-intr-handler"
6208          * event coalescing.
6209          *
6210          * Flush the mailbox to de-assert the IRQ immediately to prevent
6211          * spurious interrupts.  The flush impacts performance but
6212          * excessive spurious interrupts can be worse in some cases.
6213          */
6214         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6215         if (tg3_irq_sync(tp))
6216                 goto out;
6217         sblk->status &= ~SD_STATUS_UPDATED;
6218         if (likely(tg3_has_work(tnapi))) {
6219                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6220                 napi_schedule(&tnapi->napi);
6221         } else {
6222                 /* No work, shared interrupt perhaps?  re-enable
6223                  * interrupts, and flush that PCI write
6224                  */
6225                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6226                                0x00000000);
6227         }
6228 out:
6229         return IRQ_RETVAL(handled);
6230 }
6231
6232 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6233 {
6234         struct tg3_napi *tnapi = dev_id;
6235         struct tg3 *tp = tnapi->tp;
6236         struct tg3_hw_status *sblk = tnapi->hw_status;
6237         unsigned int handled = 1;
6238
6239         /* In INTx mode, it is possible for the interrupt to arrive at
6240          * the CPU before the status block posted prior to the interrupt.
6241          * Reading the PCI State register will confirm whether the
6242          * interrupt is ours and will flush the status block.
6243          */
6244         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6245                 if (tg3_flag(tp, CHIP_RESETTING) ||
6246                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6247                         handled = 0;
6248                         goto out;
6249                 }
6250         }
6251
6252         /*
6253          * writing any value to intr-mbox-0 clears PCI INTA# and
6254          * chip-internal interrupt pending events.
6255          * writing non-zero to intr-mbox-0 additional tells the
6256          * NIC to stop sending us irqs, engaging "in-intr-handler"
6257          * event coalescing.
6258          *
6259          * Flush the mailbox to de-assert the IRQ immediately to prevent
6260          * spurious interrupts.  The flush impacts performance but
6261          * excessive spurious interrupts can be worse in some cases.
6262          */
6263         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6264
6265         /*
6266          * In a shared interrupt configuration, sometimes other devices'
6267          * interrupts will scream.  We record the current status tag here
6268          * so that the above check can report that the screaming interrupts
6269          * are unhandled.  Eventually they will be silenced.
6270          */
6271         tnapi->last_irq_tag = sblk->status_tag;
6272
6273         if (tg3_irq_sync(tp))
6274                 goto out;
6275
6276         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6277
6278         napi_schedule(&tnapi->napi);
6279
6280 out:
6281         return IRQ_RETVAL(handled);
6282 }
6283
6284 /* ISR for interrupt test */
6285 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6286 {
6287         struct tg3_napi *tnapi = dev_id;
6288         struct tg3 *tp = tnapi->tp;
6289         struct tg3_hw_status *sblk = tnapi->hw_status;
6290
6291         if ((sblk->status & SD_STATUS_UPDATED) ||
6292             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6293                 tg3_disable_ints(tp);
6294                 return IRQ_RETVAL(1);
6295         }
6296         return IRQ_RETVAL(0);
6297 }
6298
6299 static int tg3_init_hw(struct tg3 *, int);
6300 static int tg3_halt(struct tg3 *, int, int);
6301
6302 /* Restart hardware after configuration changes, self-test, etc.
6303  * Invoked with tp->lock held.
6304  */
6305 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6306         __releases(tp->lock)
6307         __acquires(tp->lock)
6308 {
6309         int err;
6310
6311         err = tg3_init_hw(tp, reset_phy);
6312         if (err) {
6313                 netdev_err(tp->dev,
6314                            "Failed to re-initialize device, aborting\n");
6315                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6316                 tg3_full_unlock(tp);
6317                 del_timer_sync(&tp->timer);
6318                 tp->irq_sync = 0;
6319                 tg3_napi_enable(tp);
6320                 dev_close(tp->dev);
6321                 tg3_full_lock(tp, 0);
6322         }
6323         return err;
6324 }
6325
6326 #ifdef CONFIG_NET_POLL_CONTROLLER
6327 static void tg3_poll_controller(struct net_device *dev)
6328 {
6329         int i;
6330         struct tg3 *tp = netdev_priv(dev);
6331
6332         for (i = 0; i < tp->irq_cnt; i++)
6333                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6334 }
6335 #endif
6336
6337 static void tg3_reset_task(struct work_struct *work)
6338 {
6339         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340         int err;
6341         unsigned int restart_timer;
6342
6343         tg3_full_lock(tp, 0);
6344
6345         if (!netif_running(tp->dev)) {
6346                 tg3_full_unlock(tp);
6347                 return;
6348         }
6349
6350         tg3_full_unlock(tp);
6351
6352         tg3_phy_stop(tp);
6353
6354         tg3_netif_stop(tp);
6355
6356         tg3_full_lock(tp, 1);
6357
6358         restart_timer = tg3_flag(tp, RESTART_TIMER);
6359         tg3_flag_clear(tp, RESTART_TIMER);
6360
6361         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6364                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6365                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6366         }
6367
6368         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6369         err = tg3_init_hw(tp, 1);
6370         if (err)
6371                 goto out;
6372
6373         tg3_netif_start(tp);
6374
6375         if (restart_timer)
6376                 mod_timer(&tp->timer, jiffies + 1);
6377
6378 out:
6379         tg3_full_unlock(tp);
6380
6381         if (!err)
6382                 tg3_phy_start(tp);
6383 }
6384
6385 static void tg3_tx_timeout(struct net_device *dev)
6386 {
6387         struct tg3 *tp = netdev_priv(dev);
6388
6389         if (netif_msg_tx_err(tp)) {
6390                 netdev_err(dev, "transmit timed out, resetting\n");
6391                 tg3_dump_state(tp);
6392         }
6393
6394         schedule_work(&tp->reset_task);
6395 }
6396
6397 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6398 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6399 {
6400         u32 base = (u32) mapping & 0xffffffff;
6401
6402         return (base > 0xffffdcc0) && (base + len + 8 < base);
6403 }
6404
6405 /* Test for DMA addresses > 40-bit */
6406 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6407                                           int len)
6408 {
6409 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6410         if (tg3_flag(tp, 40BIT_DMA_BUG))
6411                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6412         return 0;
6413 #else
6414         return 0;
6415 #endif
6416 }
6417
6418 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6419                                  dma_addr_t mapping, u32 len, u32 flags,
6420                                  u32 mss, u32 vlan)
6421 {
6422         txbd->addr_hi = ((u64) mapping >> 32);
6423         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6424         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6425         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6426 }
6427
6428 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6429                             dma_addr_t map, u32 len, u32 flags,
6430                             u32 mss, u32 vlan)
6431 {
6432         struct tg3 *tp = tnapi->tp;
6433         bool hwbug = false;
6434
6435         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6436                 hwbug = 1;
6437
6438         if (tg3_4g_overflow_test(map, len))
6439                 hwbug = 1;
6440
6441         if (tg3_40bit_overflow_test(tp, map, len))
6442                 hwbug = 1;
6443
6444         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6445                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446                 while (len > TG3_TX_BD_DMA_MAX) {
6447                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6448                         len -= TG3_TX_BD_DMA_MAX;
6449
6450                         if (len) {
6451                                 tnapi->tx_buffers[*entry].fragmented = true;
6452                                 /* Avoid the 8byte DMA problem */
6453                                 if (len <= 8) {
6454                                         len += TG3_TX_BD_DMA_MAX / 2;
6455                                         frag_len = TG3_TX_BD_DMA_MAX / 2;
6456                                 }
6457                         } else
6458                                 tmp_flag = flags;
6459
6460                         if (*budget) {
6461                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462                                               frag_len, tmp_flag, mss, vlan);
6463                                 (*budget)--;
6464                                 *entry = NEXT_TX(*entry);
6465                         } else {
6466                                 hwbug = 1;
6467                                 break;
6468                         }
6469
6470                         map += frag_len;
6471                 }
6472
6473                 if (len) {
6474                         if (*budget) {
6475                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476                                               len, flags, mss, vlan);
6477                                 (*budget)--;
6478                                 *entry = NEXT_TX(*entry);
6479                         } else {
6480                                 hwbug = 1;
6481                         }
6482                 }
6483         } else {
6484                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6485                               len, flags, mss, vlan);
6486                 *entry = NEXT_TX(*entry);
6487         }
6488
6489         return hwbug;
6490 }
6491
6492 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6493 {
6494         int i;
6495         struct sk_buff *skb;
6496         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6497
6498         skb = txb->skb;
6499         txb->skb = NULL;
6500
6501         pci_unmap_single(tnapi->tp->pdev,
6502                          dma_unmap_addr(txb, mapping),
6503                          skb_headlen(skb),
6504                          PCI_DMA_TODEVICE);
6505
6506         while (txb->fragmented) {
6507                 txb->fragmented = false;
6508                 entry = NEXT_TX(entry);
6509                 txb = &tnapi->tx_buffers[entry];
6510         }
6511
6512         for (i = 0; i < last; i++) {
6513                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6514
6515                 entry = NEXT_TX(entry);
6516                 txb = &tnapi->tx_buffers[entry];
6517
6518                 pci_unmap_page(tnapi->tp->pdev,
6519                                dma_unmap_addr(txb, mapping),
6520                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6521
6522                 while (txb->fragmented) {
6523                         txb->fragmented = false;
6524                         entry = NEXT_TX(entry);
6525                         txb = &tnapi->tx_buffers[entry];
6526                 }
6527         }
6528 }
6529
6530 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6531 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6532                                        struct sk_buff **pskb,
6533                                        u32 *entry, u32 *budget,
6534                                        u32 base_flags, u32 mss, u32 vlan)
6535 {
6536         struct tg3 *tp = tnapi->tp;
6537         struct sk_buff *new_skb, *skb = *pskb;
6538         dma_addr_t new_addr = 0;
6539         int ret = 0;
6540
6541         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6542                 new_skb = skb_copy(skb, GFP_ATOMIC);
6543         else {
6544                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6545
6546                 new_skb = skb_copy_expand(skb,
6547                                           skb_headroom(skb) + more_headroom,
6548                                           skb_tailroom(skb), GFP_ATOMIC);
6549         }
6550
6551         if (!new_skb) {
6552                 ret = -1;
6553         } else {
6554                 /* New SKB is guaranteed to be linear. */
6555                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6556                                           PCI_DMA_TODEVICE);
6557                 /* Make sure the mapping succeeded */
6558                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6559                         dev_kfree_skb(new_skb);
6560                         ret = -1;
6561                 } else {
6562                         base_flags |= TXD_FLAG_END;
6563
6564                         tnapi->tx_buffers[*entry].skb = new_skb;
6565                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6566                                            mapping, new_addr);
6567
6568                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569                                             new_skb->len, base_flags,
6570                                             mss, vlan)) {
6571                                 tg3_tx_skb_unmap(tnapi, *entry, 0);
6572                                 dev_kfree_skb(new_skb);
6573                                 ret = -1;
6574                         }
6575                 }
6576         }
6577
6578         dev_kfree_skb(skb);
6579         *pskb = new_skb;
6580         return ret;
6581 }
6582
6583 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6584
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586  * TSO header is greater than 80 bytes.
6587  */
6588 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6589 {
6590         struct sk_buff *segs, *nskb;
6591         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6592
6593         /* Estimate the number of fragments in the worst case */
6594         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6595                 netif_stop_queue(tp->dev);
6596
6597                 /* netif_tx_stop_queue() must be done before checking
6598                  * checking tx index in tg3_tx_avail() below, because in
6599                  * tg3_tx(), we update tx index before checking for
6600                  * netif_tx_queue_stopped().
6601                  */
6602                 smp_mb();
6603                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6604                         return NETDEV_TX_BUSY;
6605
6606                 netif_wake_queue(tp->dev);
6607         }
6608
6609         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6610         if (IS_ERR(segs))
6611                 goto tg3_tso_bug_end;
6612
6613         do {
6614                 nskb = segs;
6615                 segs = segs->next;
6616                 nskb->next = NULL;
6617                 tg3_start_xmit(nskb, tp->dev);
6618         } while (segs);
6619
6620 tg3_tso_bug_end:
6621         dev_kfree_skb(skb);
6622
6623         return NETDEV_TX_OK;
6624 }
6625
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6628  */
6629 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6630 {
6631         struct tg3 *tp = netdev_priv(dev);
6632         u32 len, entry, base_flags, mss, vlan = 0;
6633         u32 budget;
6634         int i = -1, would_hit_hwbug;
6635         dma_addr_t mapping;
6636         struct tg3_napi *tnapi;
6637         struct netdev_queue *txq;
6638         unsigned int last;
6639
6640         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6641         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6642         if (tg3_flag(tp, ENABLE_TSS))
6643                 tnapi++;
6644
6645         budget = tg3_tx_avail(tnapi);
6646
6647         /* We are running in BH disabled context with netif_tx_lock
6648          * and TX reclaim runs via tp->napi.poll inside of a software
6649          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6650          * no IRQ context deadlocks to worry about either.  Rejoice!
6651          */
6652         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6653                 if (!netif_tx_queue_stopped(txq)) {
6654                         netif_tx_stop_queue(txq);
6655
6656                         /* This is a hard error, log it. */
6657                         netdev_err(dev,
6658                                    "BUG! Tx Ring full when queue awake!\n");
6659                 }
6660                 return NETDEV_TX_BUSY;
6661         }
6662
6663         entry = tnapi->tx_prod;
6664         base_flags = 0;
6665         if (skb->ip_summed == CHECKSUM_PARTIAL)
6666                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6667
6668         mss = skb_shinfo(skb)->gso_size;
6669         if (mss) {
6670                 struct iphdr *iph;
6671                 u32 tcp_opt_len, hdr_len;
6672
6673                 if (skb_header_cloned(skb) &&
6674                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6675                         dev_kfree_skb(skb);
6676                         goto out_unlock;
6677                 }
6678
6679                 iph = ip_hdr(skb);
6680                 tcp_opt_len = tcp_optlen(skb);
6681
6682                 if (skb_is_gso_v6(skb)) {
6683                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6684                 } else {
6685                         u32 ip_tcp_len;
6686
6687                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6688                         hdr_len = ip_tcp_len + tcp_opt_len;
6689
6690                         iph->check = 0;
6691                         iph->tot_len = htons(mss + hdr_len);
6692                 }
6693
6694                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6695                     tg3_flag(tp, TSO_BUG))
6696                         return tg3_tso_bug(tp, skb);
6697
6698                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6699                                TXD_FLAG_CPU_POST_DMA);
6700
6701                 if (tg3_flag(tp, HW_TSO_1) ||
6702                     tg3_flag(tp, HW_TSO_2) ||
6703                     tg3_flag(tp, HW_TSO_3)) {
6704                         tcp_hdr(skb)->check = 0;
6705                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6706                 } else
6707                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6708                                                                  iph->daddr, 0,
6709                                                                  IPPROTO_TCP,
6710                                                                  0);
6711
6712                 if (tg3_flag(tp, HW_TSO_3)) {
6713                         mss |= (hdr_len & 0xc) << 12;
6714                         if (hdr_len & 0x10)
6715                                 base_flags |= 0x00000010;
6716                         base_flags |= (hdr_len & 0x3e0) << 5;
6717                 } else if (tg3_flag(tp, HW_TSO_2))
6718                         mss |= hdr_len << 9;
6719                 else if (tg3_flag(tp, HW_TSO_1) ||
6720                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6721                         if (tcp_opt_len || iph->ihl > 5) {
6722                                 int tsflags;
6723
6724                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6725                                 mss |= (tsflags << 11);
6726                         }
6727                 } else {
6728                         if (tcp_opt_len || iph->ihl > 5) {
6729                                 int tsflags;
6730
6731                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6732                                 base_flags |= tsflags << 12;
6733                         }
6734                 }
6735         }
6736
6737         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6738             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6739                 base_flags |= TXD_FLAG_JMB_PKT;
6740
6741         if (vlan_tx_tag_present(skb)) {
6742                 base_flags |= TXD_FLAG_VLAN;
6743                 vlan = vlan_tx_tag_get(skb);
6744         }
6745
6746         len = skb_headlen(skb);
6747
6748         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6749         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6750                 dev_kfree_skb(skb);
6751                 goto out_unlock;
6752         }
6753
6754         tnapi->tx_buffers[entry].skb = skb;
6755         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6756
6757         would_hit_hwbug = 0;
6758
6759         if (tg3_flag(tp, 5701_DMA_BUG))
6760                 would_hit_hwbug = 1;
6761
6762         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6763                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6764                             mss, vlan))
6765                 would_hit_hwbug = 1;
6766
6767         /* Now loop through additional data fragments, and queue them. */
6768         if (skb_shinfo(skb)->nr_frags > 0) {
6769                 u32 tmp_mss = mss;
6770
6771                 if (!tg3_flag(tp, HW_TSO_1) &&
6772                     !tg3_flag(tp, HW_TSO_2) &&
6773                     !tg3_flag(tp, HW_TSO_3))
6774                         tmp_mss = 0;
6775
6776                 last = skb_shinfo(skb)->nr_frags - 1;
6777                 for (i = 0; i <= last; i++) {
6778                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6779
6780                         len = skb_frag_size(frag);
6781                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6782                                                    len, DMA_TO_DEVICE);
6783
6784                         tnapi->tx_buffers[entry].skb = NULL;
6785                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6786                                            mapping);
6787                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6788                                 goto dma_error;
6789
6790                         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6791                                             len, base_flags |
6792                                             ((i == last) ? TXD_FLAG_END : 0),
6793                                             tmp_mss, vlan))
6794                                 would_hit_hwbug = 1;
6795                 }
6796         }
6797
6798         if (would_hit_hwbug) {
6799                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6800
6801                 /* If the workaround fails due to memory/mapping
6802                  * failure, silently drop this packet.
6803                  */
6804                 entry = tnapi->tx_prod;
6805                 budget = tg3_tx_avail(tnapi);
6806                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6807                                                 base_flags, mss, vlan))
6808                         goto out_unlock;
6809         }
6810
6811         skb_tx_timestamp(skb);
6812
6813         /* Packets are ready, update Tx producer idx local and on card. */
6814         tw32_tx_mbox(tnapi->prodmbox, entry);
6815
6816         tnapi->tx_prod = entry;
6817         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6818                 netif_tx_stop_queue(txq);
6819
6820                 /* netif_tx_stop_queue() must be done before checking
6821                  * checking tx index in tg3_tx_avail() below, because in
6822                  * tg3_tx(), we update tx index before checking for
6823                  * netif_tx_queue_stopped().
6824                  */
6825                 smp_mb();
6826                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6827                         netif_tx_wake_queue(txq);
6828         }
6829
6830 out_unlock:
6831         mmiowb();
6832
6833         return NETDEV_TX_OK;
6834
6835 dma_error:
6836         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6837         dev_kfree_skb(skb);
6838         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6839         return NETDEV_TX_OK;
6840 }
6841
6842 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6843 {
6844         if (enable) {
6845                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6846                                   MAC_MODE_PORT_MODE_MASK);
6847
6848                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6849
6850                 if (!tg3_flag(tp, 5705_PLUS))
6851                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6852
6853                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6854                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6855                 else
6856                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6857         } else {
6858                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6859
6860                 if (tg3_flag(tp, 5705_PLUS) ||
6861                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6862                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6863                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6864         }
6865
6866         tw32(MAC_MODE, tp->mac_mode);
6867         udelay(40);
6868 }
6869
6870 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6871 {
6872         u32 val, bmcr, mac_mode, ptest = 0;
6873
6874         tg3_phy_toggle_apd(tp, false);
6875         tg3_phy_toggle_automdix(tp, 0);
6876
6877         if (extlpbk && tg3_phy_set_extloopbk(tp))
6878                 return -EIO;
6879
6880         bmcr = BMCR_FULLDPLX;
6881         switch (speed) {
6882         case SPEED_10:
6883                 break;
6884         case SPEED_100:
6885                 bmcr |= BMCR_SPEED100;
6886                 break;
6887         case SPEED_1000:
6888         default:
6889                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6890                         speed = SPEED_100;
6891                         bmcr |= BMCR_SPEED100;
6892                 } else {
6893                         speed = SPEED_1000;
6894                         bmcr |= BMCR_SPEED1000;
6895                 }
6896         }
6897
6898         if (extlpbk) {
6899                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6900                         tg3_readphy(tp, MII_CTRL1000, &val);
6901                         val |= CTL1000_AS_MASTER |
6902                                CTL1000_ENABLE_MASTER;
6903                         tg3_writephy(tp, MII_CTRL1000, val);
6904                 } else {
6905                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6906                                 MII_TG3_FET_PTEST_TRIM_2;
6907                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6908                 }
6909         } else
6910                 bmcr |= BMCR_LOOPBACK;
6911
6912         tg3_writephy(tp, MII_BMCR, bmcr);
6913
6914         /* The write needs to be flushed for the FETs */
6915         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6916                 tg3_readphy(tp, MII_BMCR, &bmcr);
6917
6918         udelay(40);
6919
6920         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6921             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6922                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6923                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6924                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6925
6926                 /* The write needs to be flushed for the AC131 */
6927                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6928         }
6929
6930         /* Reset to prevent losing 1st rx packet intermittently */
6931         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6932             tg3_flag(tp, 5780_CLASS)) {
6933                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6934                 udelay(10);
6935                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6936         }
6937
6938         mac_mode = tp->mac_mode &
6939                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6940         if (speed == SPEED_1000)
6941                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6942         else
6943                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6944
6945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6946                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6947
6948                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6949                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6950                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6951                         mac_mode |= MAC_MODE_LINK_POLARITY;
6952
6953                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6954                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6955         }
6956
6957         tw32(MAC_MODE, mac_mode);
6958         udelay(40);
6959
6960         return 0;
6961 }
6962
6963 static void tg3_set_loopback(struct net_device *dev, u32 features)
6964 {
6965         struct tg3 *tp = netdev_priv(dev);
6966
6967         if (features & NETIF_F_LOOPBACK) {
6968                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6969                         return;
6970
6971                 spin_lock_bh(&tp->lock);
6972                 tg3_mac_loopback(tp, true);
6973                 netif_carrier_on(tp->dev);
6974                 spin_unlock_bh(&tp->lock);
6975                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6976         } else {
6977                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6978                         return;
6979
6980                 spin_lock_bh(&tp->lock);
6981                 tg3_mac_loopback(tp, false);
6982                 /* Force link status check */
6983                 tg3_setup_phy(tp, 1);
6984                 spin_unlock_bh(&tp->lock);
6985                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6986         }
6987 }
6988
6989 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6990 {
6991         struct tg3 *tp = netdev_priv(dev);
6992
6993         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6994                 features &= ~NETIF_F_ALL_TSO;
6995
6996         return features;
6997 }
6998
6999 static int tg3_set_features(struct net_device *dev, u32 features)
7000 {
7001         u32 changed = dev->features ^ features;
7002
7003         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7004                 tg3_set_loopback(dev, features);
7005
7006         return 0;
7007 }
7008
7009 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7010                                int new_mtu)
7011 {
7012         dev->mtu = new_mtu;
7013
7014         if (new_mtu > ETH_DATA_LEN) {
7015                 if (tg3_flag(tp, 5780_CLASS)) {
7016                         netdev_update_features(dev);
7017                         tg3_flag_clear(tp, TSO_CAPABLE);
7018                 } else {
7019                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7020                 }
7021         } else {
7022                 if (tg3_flag(tp, 5780_CLASS)) {
7023                         tg3_flag_set(tp, TSO_CAPABLE);
7024                         netdev_update_features(dev);
7025                 }
7026                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7027         }
7028 }
7029
7030 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7031 {
7032         struct tg3 *tp = netdev_priv(dev);
7033         int err;
7034
7035         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7036                 return -EINVAL;
7037
7038         if (!netif_running(dev)) {
7039                 /* We'll just catch it later when the
7040                  * device is up'd.
7041                  */
7042                 tg3_set_mtu(dev, tp, new_mtu);
7043                 return 0;
7044         }
7045
7046         tg3_phy_stop(tp);
7047
7048         tg3_netif_stop(tp);
7049
7050         tg3_full_lock(tp, 1);
7051
7052         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7053
7054         tg3_set_mtu(dev, tp, new_mtu);
7055
7056         err = tg3_restart_hw(tp, 0);
7057
7058         if (!err)
7059                 tg3_netif_start(tp);
7060
7061         tg3_full_unlock(tp);
7062
7063         if (!err)
7064                 tg3_phy_start(tp);
7065
7066         return err;
7067 }
7068
7069 static void tg3_rx_prodring_free(struct tg3 *tp,
7070                                  struct tg3_rx_prodring_set *tpr)
7071 {
7072         int i;
7073
7074         if (tpr != &tp->napi[0].prodring) {
7075                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7076                      i = (i + 1) & tp->rx_std_ring_mask)
7077                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7078                                         tp->rx_pkt_map_sz);
7079
7080                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7081                         for (i = tpr->rx_jmb_cons_idx;
7082                              i != tpr->rx_jmb_prod_idx;
7083                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7084                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7085                                                 TG3_RX_JMB_MAP_SZ);
7086                         }
7087                 }
7088
7089                 return;
7090         }
7091
7092         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7093                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7094                                 tp->rx_pkt_map_sz);
7095
7096         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7097                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7098                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7099                                         TG3_RX_JMB_MAP_SZ);
7100         }
7101 }
7102
7103 /* Initialize rx rings for packet processing.
7104  *
7105  * The chip has been shut down and the driver detached from
7106  * the networking, so no interrupts or new tx packets will
7107  * end up in the driver.  tp->{tx,}lock are held and thus
7108  * we may not sleep.
7109  */
7110 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7111                                  struct tg3_rx_prodring_set *tpr)
7112 {
7113         u32 i, rx_pkt_dma_sz;
7114
7115         tpr->rx_std_cons_idx = 0;
7116         tpr->rx_std_prod_idx = 0;
7117         tpr->rx_jmb_cons_idx = 0;
7118         tpr->rx_jmb_prod_idx = 0;
7119
7120         if (tpr != &tp->napi[0].prodring) {
7121                 memset(&tpr->rx_std_buffers[0], 0,
7122                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7123                 if (tpr->rx_jmb_buffers)
7124                         memset(&tpr->rx_jmb_buffers[0], 0,
7125                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7126                 goto done;
7127         }
7128
7129         /* Zero out all descriptors. */
7130         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7131
7132         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7133         if (tg3_flag(tp, 5780_CLASS) &&
7134             tp->dev->mtu > ETH_DATA_LEN)
7135                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7136         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7137
7138         /* Initialize invariants of the rings, we only set this
7139          * stuff once.  This works because the card does not
7140          * write into the rx buffer posting rings.
7141          */
7142         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7143                 struct tg3_rx_buffer_desc *rxd;
7144
7145                 rxd = &tpr->rx_std[i];
7146                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7147                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7148                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7149                                (i << RXD_OPAQUE_INDEX_SHIFT));
7150         }
7151
7152         /* Now allocate fresh SKBs for each rx ring. */
7153         for (i = 0; i < tp->rx_pending; i++) {
7154                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7155                         netdev_warn(tp->dev,
7156                                     "Using a smaller RX standard ring. Only "
7157                                     "%d out of %d buffers were allocated "
7158                                     "successfully\n", i, tp->rx_pending);
7159                         if (i == 0)
7160                                 goto initfail;
7161                         tp->rx_pending = i;
7162                         break;
7163                 }
7164         }
7165
7166         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7167                 goto done;
7168
7169         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7170
7171         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7172                 goto done;
7173
7174         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7175                 struct tg3_rx_buffer_desc *rxd;
7176
7177                 rxd = &tpr->rx_jmb[i].std;
7178                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7179                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7180                                   RXD_FLAG_JUMBO;
7181                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7182                        (i << RXD_OPAQUE_INDEX_SHIFT));
7183         }
7184
7185         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7186                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7187                         netdev_warn(tp->dev,
7188                                     "Using a smaller RX jumbo ring. Only %d "
7189                                     "out of %d buffers were allocated "
7190                                     "successfully\n", i, tp->rx_jumbo_pending);
7191                         if (i == 0)
7192                                 goto initfail;
7193                         tp->rx_jumbo_pending = i;
7194                         break;
7195                 }
7196         }
7197
7198 done:
7199         return 0;
7200
7201 initfail:
7202         tg3_rx_prodring_free(tp, tpr);
7203         return -ENOMEM;
7204 }
7205
7206 static void tg3_rx_prodring_fini(struct tg3 *tp,
7207                                  struct tg3_rx_prodring_set *tpr)
7208 {
7209         kfree(tpr->rx_std_buffers);
7210         tpr->rx_std_buffers = NULL;
7211         kfree(tpr->rx_jmb_buffers);
7212         tpr->rx_jmb_buffers = NULL;
7213         if (tpr->rx_std) {
7214                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7215                                   tpr->rx_std, tpr->rx_std_mapping);
7216                 tpr->rx_std = NULL;
7217         }
7218         if (tpr->rx_jmb) {
7219                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7220                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7221                 tpr->rx_jmb = NULL;
7222         }
7223 }
7224
7225 static int tg3_rx_prodring_init(struct tg3 *tp,
7226                                 struct tg3_rx_prodring_set *tpr)
7227 {
7228         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7229                                       GFP_KERNEL);
7230         if (!tpr->rx_std_buffers)
7231                 return -ENOMEM;
7232
7233         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7234                                          TG3_RX_STD_RING_BYTES(tp),
7235                                          &tpr->rx_std_mapping,
7236                                          GFP_KERNEL);
7237         if (!tpr->rx_std)
7238                 goto err_out;
7239
7240         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7241                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7242                                               GFP_KERNEL);
7243                 if (!tpr->rx_jmb_buffers)
7244                         goto err_out;
7245
7246                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7247                                                  TG3_RX_JMB_RING_BYTES(tp),
7248                                                  &tpr->rx_jmb_mapping,
7249                                                  GFP_KERNEL);
7250                 if (!tpr->rx_jmb)
7251                         goto err_out;
7252         }
7253
7254         return 0;
7255
7256 err_out:
7257         tg3_rx_prodring_fini(tp, tpr);
7258         return -ENOMEM;
7259 }
7260
7261 /* Free up pending packets in all rx/tx rings.
7262  *
7263  * The chip has been shut down and the driver detached from
7264  * the networking, so no interrupts or new tx packets will
7265  * end up in the driver.  tp->{tx,}lock is not held and we are not
7266  * in an interrupt context and thus may sleep.
7267  */
7268 static void tg3_free_rings(struct tg3 *tp)
7269 {
7270         int i, j;
7271
7272         for (j = 0; j < tp->irq_cnt; j++) {
7273                 struct tg3_napi *tnapi = &tp->napi[j];
7274
7275                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7276
7277                 if (!tnapi->tx_buffers)
7278                         continue;
7279
7280                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7281                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7282
7283                         if (!skb)
7284                                 continue;
7285
7286                         tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
7287
7288                         dev_kfree_skb_any(skb);
7289                 }
7290         }
7291 }
7292
7293 /* Initialize tx/rx rings for packet processing.
7294  *
7295  * The chip has been shut down and the driver detached from
7296  * the networking, so no interrupts or new tx packets will
7297  * end up in the driver.  tp->{tx,}lock are held and thus
7298  * we may not sleep.
7299  */
7300 static int tg3_init_rings(struct tg3 *tp)
7301 {
7302         int i;
7303
7304         /* Free up all the SKBs. */
7305         tg3_free_rings(tp);
7306
7307         for (i = 0; i < tp->irq_cnt; i++) {
7308                 struct tg3_napi *tnapi = &tp->napi[i];
7309
7310                 tnapi->last_tag = 0;
7311                 tnapi->last_irq_tag = 0;
7312                 tnapi->hw_status->status = 0;
7313                 tnapi->hw_status->status_tag = 0;
7314                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7315
7316                 tnapi->tx_prod = 0;
7317                 tnapi->tx_cons = 0;
7318                 if (tnapi->tx_ring)
7319                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7320
7321                 tnapi->rx_rcb_ptr = 0;
7322                 if (tnapi->rx_rcb)
7323                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7324
7325                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7326                         tg3_free_rings(tp);
7327                         return -ENOMEM;
7328                 }
7329         }
7330
7331         return 0;
7332 }
7333
7334 /*
7335  * Must not be invoked with interrupt sources disabled and
7336  * the hardware shutdown down.
7337  */
7338 static void tg3_free_consistent(struct tg3 *tp)
7339 {
7340         int i;
7341
7342         for (i = 0; i < tp->irq_cnt; i++) {
7343                 struct tg3_napi *tnapi = &tp->napi[i];
7344
7345                 if (tnapi->tx_ring) {
7346                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7347                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7348                         tnapi->tx_ring = NULL;
7349                 }
7350
7351                 kfree(tnapi->tx_buffers);
7352                 tnapi->tx_buffers = NULL;
7353
7354                 if (tnapi->rx_rcb) {
7355                         dma_free_coherent(&tp->pdev->dev,
7356                                           TG3_RX_RCB_RING_BYTES(tp),
7357                                           tnapi->rx_rcb,
7358                                           tnapi->rx_rcb_mapping);
7359                         tnapi->rx_rcb = NULL;
7360                 }
7361
7362                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7363
7364                 if (tnapi->hw_status) {
7365                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7366                                           tnapi->hw_status,
7367                                           tnapi->status_mapping);
7368                         tnapi->hw_status = NULL;
7369                 }
7370         }
7371
7372         if (tp->hw_stats) {
7373                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7374                                   tp->hw_stats, tp->stats_mapping);
7375                 tp->hw_stats = NULL;
7376         }
7377 }
7378
7379 /*
7380  * Must not be invoked with interrupt sources disabled and
7381  * the hardware shutdown down.  Can sleep.
7382  */
7383 static int tg3_alloc_consistent(struct tg3 *tp)
7384 {
7385         int i;
7386
7387         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7388                                           sizeof(struct tg3_hw_stats),
7389                                           &tp->stats_mapping,
7390                                           GFP_KERNEL);
7391         if (!tp->hw_stats)
7392                 goto err_out;
7393
7394         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7395
7396         for (i = 0; i < tp->irq_cnt; i++) {
7397                 struct tg3_napi *tnapi = &tp->napi[i];
7398                 struct tg3_hw_status *sblk;
7399
7400                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7401                                                       TG3_HW_STATUS_SIZE,
7402                                                       &tnapi->status_mapping,
7403                                                       GFP_KERNEL);
7404                 if (!tnapi->hw_status)
7405                         goto err_out;
7406
7407                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7408                 sblk = tnapi->hw_status;
7409
7410                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7411                         goto err_out;
7412
7413                 /* If multivector TSS is enabled, vector 0 does not handle
7414                  * tx interrupts.  Don't allocate any resources for it.
7415                  */
7416                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7417                     (i && tg3_flag(tp, ENABLE_TSS))) {
7418                         tnapi->tx_buffers = kzalloc(
7419                                                sizeof(struct tg3_tx_ring_info) *
7420                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7421                         if (!tnapi->tx_buffers)
7422                                 goto err_out;
7423
7424                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7425                                                             TG3_TX_RING_BYTES,
7426                                                         &tnapi->tx_desc_mapping,
7427                                                             GFP_KERNEL);
7428                         if (!tnapi->tx_ring)
7429                                 goto err_out;
7430                 }
7431
7432                 /*
7433                  * When RSS is enabled, the status block format changes
7434                  * slightly.  The "rx_jumbo_consumer", "reserved",
7435                  * and "rx_mini_consumer" members get mapped to the
7436                  * other three rx return ring producer indexes.
7437                  */
7438                 switch (i) {
7439                 default:
7440                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7441                         break;
7442                 case 2:
7443                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7444                         break;
7445                 case 3:
7446                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7447                         break;
7448                 case 4:
7449                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7450                         break;
7451                 }
7452
7453                 /*
7454                  * If multivector RSS is enabled, vector 0 does not handle
7455                  * rx or tx interrupts.  Don't allocate any resources for it.
7456                  */
7457                 if (!i && tg3_flag(tp, ENABLE_RSS))
7458                         continue;
7459
7460                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7461                                                    TG3_RX_RCB_RING_BYTES(tp),
7462                                                    &tnapi->rx_rcb_mapping,
7463                                                    GFP_KERNEL);
7464                 if (!tnapi->rx_rcb)
7465                         goto err_out;
7466
7467                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7468         }
7469
7470         return 0;
7471
7472 err_out:
7473         tg3_free_consistent(tp);
7474         return -ENOMEM;
7475 }
7476
7477 #define MAX_WAIT_CNT 1000
7478
7479 /* To stop a block, clear the enable bit and poll till it
7480  * clears.  tp->lock is held.
7481  */
7482 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7483 {
7484         unsigned int i;
7485         u32 val;
7486
7487         if (tg3_flag(tp, 5705_PLUS)) {
7488                 switch (ofs) {
7489                 case RCVLSC_MODE:
7490                 case DMAC_MODE:
7491                 case MBFREE_MODE:
7492                 case BUFMGR_MODE:
7493                 case MEMARB_MODE:
7494                         /* We can't enable/disable these bits of the
7495                          * 5705/5750, just say success.
7496                          */
7497                         return 0;
7498
7499                 default:
7500                         break;
7501                 }
7502         }
7503
7504         val = tr32(ofs);
7505         val &= ~enable_bit;
7506         tw32_f(ofs, val);
7507
7508         for (i = 0; i < MAX_WAIT_CNT; i++) {
7509                 udelay(100);
7510                 val = tr32(ofs);
7511                 if ((val & enable_bit) == 0)
7512                         break;
7513         }
7514
7515         if (i == MAX_WAIT_CNT && !silent) {
7516                 dev_err(&tp->pdev->dev,
7517                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7518                         ofs, enable_bit);
7519                 return -ENODEV;
7520         }
7521
7522         return 0;
7523 }
7524
7525 /* tp->lock is held. */
7526 static int tg3_abort_hw(struct tg3 *tp, int silent)
7527 {
7528         int i, err;
7529
7530         tg3_disable_ints(tp);
7531
7532         tp->rx_mode &= ~RX_MODE_ENABLE;
7533         tw32_f(MAC_RX_MODE, tp->rx_mode);
7534         udelay(10);
7535
7536         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7537         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7538         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7539         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7540         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7541         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7542
7543         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7544         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7545         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7546         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7547         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7548         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7549         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7550
7551         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7552         tw32_f(MAC_MODE, tp->mac_mode);
7553         udelay(40);
7554
7555         tp->tx_mode &= ~TX_MODE_ENABLE;
7556         tw32_f(MAC_TX_MODE, tp->tx_mode);
7557
7558         for (i = 0; i < MAX_WAIT_CNT; i++) {
7559                 udelay(100);
7560                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7561                         break;
7562         }
7563         if (i >= MAX_WAIT_CNT) {
7564                 dev_err(&tp->pdev->dev,
7565                         "%s timed out, TX_MODE_ENABLE will not clear "
7566                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7567                 err |= -ENODEV;
7568         }
7569
7570         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7571         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7572         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7573
7574         tw32(FTQ_RESET, 0xffffffff);
7575         tw32(FTQ_RESET, 0x00000000);
7576
7577         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7578         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7579
7580         for (i = 0; i < tp->irq_cnt; i++) {
7581                 struct tg3_napi *tnapi = &tp->napi[i];
7582                 if (tnapi->hw_status)
7583                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7584         }
7585         if (tp->hw_stats)
7586                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7587
7588         return err;
7589 }
7590
7591 /* Save PCI command register before chip reset */
7592 static void tg3_save_pci_state(struct tg3 *tp)
7593 {
7594         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7595 }
7596
7597 /* Restore PCI state after chip reset */
7598 static void tg3_restore_pci_state(struct tg3 *tp)
7599 {
7600         u32 val;
7601
7602         /* Re-enable indirect register accesses. */
7603         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7604                                tp->misc_host_ctrl);
7605
7606         /* Set MAX PCI retry to zero. */
7607         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7608         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7609             tg3_flag(tp, PCIX_MODE))
7610                 val |= PCISTATE_RETRY_SAME_DMA;
7611         /* Allow reads and writes to the APE register and memory space. */
7612         if (tg3_flag(tp, ENABLE_APE))
7613                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7614                        PCISTATE_ALLOW_APE_SHMEM_WR |
7615                        PCISTATE_ALLOW_APE_PSPACE_WR;
7616         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7617
7618         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7619
7620         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7621                 if (tg3_flag(tp, PCI_EXPRESS))
7622                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7623                 else {
7624                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7625                                               tp->pci_cacheline_sz);
7626                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7627                                               tp->pci_lat_timer);
7628                 }
7629         }
7630
7631         /* Make sure PCI-X relaxed ordering bit is clear. */
7632         if (tg3_flag(tp, PCIX_MODE)) {
7633                 u16 pcix_cmd;
7634
7635                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7636                                      &pcix_cmd);
7637                 pcix_cmd &= ~PCI_X_CMD_ERO;
7638                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7639                                       pcix_cmd);
7640         }
7641
7642         if (tg3_flag(tp, 5780_CLASS)) {
7643
7644                 /* Chip reset on 5780 will reset MSI enable bit,
7645                  * so need to restore it.
7646                  */
7647                 if (tg3_flag(tp, USING_MSI)) {
7648                         u16 ctrl;
7649
7650                         pci_read_config_word(tp->pdev,
7651                                              tp->msi_cap + PCI_MSI_FLAGS,
7652                                              &ctrl);
7653                         pci_write_config_word(tp->pdev,
7654                                               tp->msi_cap + PCI_MSI_FLAGS,
7655                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7656                         val = tr32(MSGINT_MODE);
7657                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7658                 }
7659         }
7660 }
7661
7662 /* tp->lock is held. */
7663 static int tg3_chip_reset(struct tg3 *tp)
7664 {
7665         u32 val;
7666         void (*write_op)(struct tg3 *, u32, u32);
7667         int i, err;
7668
7669         tg3_nvram_lock(tp);
7670
7671         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7672
7673         /* No matching tg3_nvram_unlock() after this because
7674          * chip reset below will undo the nvram lock.
7675          */
7676         tp->nvram_lock_cnt = 0;
7677
7678         /* GRC_MISC_CFG core clock reset will clear the memory
7679          * enable bit in PCI register 4 and the MSI enable bit
7680          * on some chips, so we save relevant registers here.
7681          */
7682         tg3_save_pci_state(tp);
7683
7684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7685             tg3_flag(tp, 5755_PLUS))
7686                 tw32(GRC_FASTBOOT_PC, 0);
7687
7688         /*
7689          * We must avoid the readl() that normally takes place.
7690          * It locks machines, causes machine checks, and other
7691          * fun things.  So, temporarily disable the 5701
7692          * hardware workaround, while we do the reset.
7693          */
7694         write_op = tp->write32;
7695         if (write_op == tg3_write_flush_reg32)
7696                 tp->write32 = tg3_write32;
7697
7698         /* Prevent the irq handler from reading or writing PCI registers
7699          * during chip reset when the memory enable bit in the PCI command
7700          * register may be cleared.  The chip does not generate interrupt
7701          * at this time, but the irq handler may still be called due to irq
7702          * sharing or irqpoll.
7703          */
7704         tg3_flag_set(tp, CHIP_RESETTING);
7705         for (i = 0; i < tp->irq_cnt; i++) {
7706                 struct tg3_napi *tnapi = &tp->napi[i];
7707                 if (tnapi->hw_status) {
7708                         tnapi->hw_status->status = 0;
7709                         tnapi->hw_status->status_tag = 0;
7710                 }
7711                 tnapi->last_tag = 0;
7712                 tnapi->last_irq_tag = 0;
7713         }
7714         smp_mb();
7715
7716         for (i = 0; i < tp->irq_cnt; i++)
7717                 synchronize_irq(tp->napi[i].irq_vec);
7718
7719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7720                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7721                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7722         }
7723
7724         /* do the reset */
7725         val = GRC_MISC_CFG_CORECLK_RESET;
7726
7727         if (tg3_flag(tp, PCI_EXPRESS)) {
7728                 /* Force PCIe 1.0a mode */
7729                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7730                     !tg3_flag(tp, 57765_PLUS) &&
7731                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7732                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7733                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7734
7735                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7736                         tw32(GRC_MISC_CFG, (1 << 29));
7737                         val |= (1 << 29);
7738                 }
7739         }
7740
7741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7742                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7743                 tw32(GRC_VCPU_EXT_CTRL,
7744                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7745         }
7746
7747         /* Manage gphy power for all CPMU absent PCIe devices. */
7748         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7749                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7750
7751         tw32(GRC_MISC_CFG, val);
7752
7753         /* restore 5701 hardware bug workaround write method */
7754         tp->write32 = write_op;
7755
7756         /* Unfortunately, we have to delay before the PCI read back.
7757          * Some 575X chips even will not respond to a PCI cfg access
7758          * when the reset command is given to the chip.
7759          *
7760          * How do these hardware designers expect things to work
7761          * properly if the PCI write is posted for a long period
7762          * of time?  It is always necessary to have some method by
7763          * which a register read back can occur to push the write
7764          * out which does the reset.
7765          *
7766          * For most tg3 variants the trick below was working.
7767          * Ho hum...
7768          */
7769         udelay(120);
7770
7771         /* Flush PCI posted writes.  The normal MMIO registers
7772          * are inaccessible at this time so this is the only
7773          * way to make this reliably (actually, this is no longer
7774          * the case, see above).  I tried to use indirect
7775          * register read/write but this upset some 5701 variants.
7776          */
7777         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7778
7779         udelay(120);
7780
7781         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7782                 u16 val16;
7783
7784                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7785                         int i;
7786                         u32 cfg_val;
7787
7788                         /* Wait for link training to complete.  */
7789                         for (i = 0; i < 5000; i++)
7790                                 udelay(100);
7791
7792                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7793                         pci_write_config_dword(tp->pdev, 0xc4,
7794                                                cfg_val | (1 << 15));
7795                 }
7796
7797                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7798                 pci_read_config_word(tp->pdev,
7799                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7800                                      &val16);
7801                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7802                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7803                 /*
7804                  * Older PCIe devices only support the 128 byte
7805                  * MPS setting.  Enforce the restriction.
7806                  */
7807                 if (!tg3_flag(tp, CPMU_PRESENT))
7808                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7809                 pci_write_config_word(tp->pdev,
7810                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7811                                       val16);
7812
7813                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7814
7815                 /* Clear error status */
7816                 pci_write_config_word(tp->pdev,
7817                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7818                                       PCI_EXP_DEVSTA_CED |
7819                                       PCI_EXP_DEVSTA_NFED |
7820                                       PCI_EXP_DEVSTA_FED |
7821                                       PCI_EXP_DEVSTA_URD);
7822         }
7823
7824         tg3_restore_pci_state(tp);
7825
7826         tg3_flag_clear(tp, CHIP_RESETTING);
7827         tg3_flag_clear(tp, ERROR_PROCESSED);
7828
7829         val = 0;
7830         if (tg3_flag(tp, 5780_CLASS))
7831                 val = tr32(MEMARB_MODE);
7832         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7833
7834         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7835                 tg3_stop_fw(tp);
7836                 tw32(0x5000, 0x400);
7837         }
7838
7839         tw32(GRC_MODE, tp->grc_mode);
7840
7841         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7842                 val = tr32(0xc4);
7843
7844                 tw32(0xc4, val | (1 << 15));
7845         }
7846
7847         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7849                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7850                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7851                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7852                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7853         }
7854
7855         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7856                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7857                 val = tp->mac_mode;
7858         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7859                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7860                 val = tp->mac_mode;
7861         } else
7862                 val = 0;
7863
7864         tw32_f(MAC_MODE, val);
7865         udelay(40);
7866
7867         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7868
7869         err = tg3_poll_fw(tp);
7870         if (err)
7871                 return err;
7872
7873         tg3_mdio_start(tp);
7874
7875         if (tg3_flag(tp, PCI_EXPRESS) &&
7876             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7877             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7878             !tg3_flag(tp, 57765_PLUS)) {
7879                 val = tr32(0x7c00);
7880
7881                 tw32(0x7c00, val | (1 << 25));
7882         }
7883
7884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7885                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7886                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7887         }
7888
7889         /* Reprobe ASF enable state.  */
7890         tg3_flag_clear(tp, ENABLE_ASF);
7891         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7892         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7893         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7894                 u32 nic_cfg;
7895
7896                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7897                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7898                         tg3_flag_set(tp, ENABLE_ASF);
7899                         tp->last_event_jiffies = jiffies;
7900                         if (tg3_flag(tp, 5750_PLUS))
7901                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7902                 }
7903         }
7904
7905         return 0;
7906 }
7907
7908 /* tp->lock is held. */
7909 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7910 {
7911         int err;
7912
7913         tg3_stop_fw(tp);
7914
7915         tg3_write_sig_pre_reset(tp, kind);
7916
7917         tg3_abort_hw(tp, silent);
7918         err = tg3_chip_reset(tp);
7919
7920         __tg3_set_mac_addr(tp, 0);
7921
7922         tg3_write_sig_legacy(tp, kind);
7923         tg3_write_sig_post_reset(tp, kind);
7924
7925         if (err)
7926                 return err;
7927
7928         return 0;
7929 }
7930
7931 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7932 {
7933         struct tg3 *tp = netdev_priv(dev);
7934         struct sockaddr *addr = p;
7935         int err = 0, skip_mac_1 = 0;
7936
7937         if (!is_valid_ether_addr(addr->sa_data))
7938                 return -EINVAL;
7939
7940         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7941
7942         if (!netif_running(dev))
7943                 return 0;
7944
7945         if (tg3_flag(tp, ENABLE_ASF)) {
7946                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7947
7948                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7949                 addr0_low = tr32(MAC_ADDR_0_LOW);
7950                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7951                 addr1_low = tr32(MAC_ADDR_1_LOW);
7952
7953                 /* Skip MAC addr 1 if ASF is using it. */
7954                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7955                     !(addr1_high == 0 && addr1_low == 0))
7956                         skip_mac_1 = 1;
7957         }
7958         spin_lock_bh(&tp->lock);
7959         __tg3_set_mac_addr(tp, skip_mac_1);
7960         spin_unlock_bh(&tp->lock);
7961
7962         return err;
7963 }
7964
7965 /* tp->lock is held. */
7966 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7967                            dma_addr_t mapping, u32 maxlen_flags,
7968                            u32 nic_addr)
7969 {
7970         tg3_write_mem(tp,
7971                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7972                       ((u64) mapping >> 32));
7973         tg3_write_mem(tp,
7974                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7975                       ((u64) mapping & 0xffffffff));
7976         tg3_write_mem(tp,
7977                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7978                        maxlen_flags);
7979
7980         if (!tg3_flag(tp, 5705_PLUS))
7981                 tg3_write_mem(tp,
7982                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7983                               nic_addr);
7984 }
7985
7986 static void __tg3_set_rx_mode(struct net_device *);
7987 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7988 {
7989         int i;
7990
7991         if (!tg3_flag(tp, ENABLE_TSS)) {
7992                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7993                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7994                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7995         } else {
7996                 tw32(HOSTCC_TXCOL_TICKS, 0);
7997                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7998                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7999         }
8000
8001         if (!tg3_flag(tp, ENABLE_RSS)) {
8002                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8003                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8004                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8005         } else {
8006                 tw32(HOSTCC_RXCOL_TICKS, 0);
8007                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8008                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8009         }
8010
8011         if (!tg3_flag(tp, 5705_PLUS)) {
8012                 u32 val = ec->stats_block_coalesce_usecs;
8013
8014                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8015                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8016
8017                 if (!netif_carrier_ok(tp->dev))
8018                         val = 0;
8019
8020                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8021         }
8022
8023         for (i = 0; i < tp->irq_cnt - 1; i++) {
8024                 u32 reg;
8025
8026                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8027                 tw32(reg, ec->rx_coalesce_usecs);
8028                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8029                 tw32(reg, ec->rx_max_coalesced_frames);
8030                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8031                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8032
8033                 if (tg3_flag(tp, ENABLE_TSS)) {
8034                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8035                         tw32(reg, ec->tx_coalesce_usecs);
8036                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8037                         tw32(reg, ec->tx_max_coalesced_frames);
8038                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8039                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8040                 }
8041         }
8042
8043         for (; i < tp->irq_max - 1; i++) {
8044                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8045                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8047
8048                 if (tg3_flag(tp, ENABLE_TSS)) {
8049                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8050                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8051                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8052                 }
8053         }
8054 }
8055
8056 /* tp->lock is held. */
8057 static void tg3_rings_reset(struct tg3 *tp)
8058 {
8059         int i;
8060         u32 stblk, txrcb, rxrcb, limit;
8061         struct tg3_napi *tnapi = &tp->napi[0];
8062
8063         /* Disable all transmit rings but the first. */
8064         if (!tg3_flag(tp, 5705_PLUS))
8065                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8066         else if (tg3_flag(tp, 5717_PLUS))
8067                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8068         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8069                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8070         else
8071                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8072
8073         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8074              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8075                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076                               BDINFO_FLAGS_DISABLED);
8077
8078
8079         /* Disable all receive return rings but the first. */
8080         if (tg3_flag(tp, 5717_PLUS))
8081                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8082         else if (!tg3_flag(tp, 5705_PLUS))
8083                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8084         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8085                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8086                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8087         else
8088                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8089
8090         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8091              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8092                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093                               BDINFO_FLAGS_DISABLED);
8094
8095         /* Disable interrupts */
8096         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8097         tp->napi[0].chk_msi_cnt = 0;
8098         tp->napi[0].last_rx_cons = 0;
8099         tp->napi[0].last_tx_cons = 0;
8100
8101         /* Zero mailbox registers. */
8102         if (tg3_flag(tp, SUPPORT_MSIX)) {
8103                 for (i = 1; i < tp->irq_max; i++) {
8104                         tp->napi[i].tx_prod = 0;
8105                         tp->napi[i].tx_cons = 0;
8106                         if (tg3_flag(tp, ENABLE_TSS))
8107                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8108                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8109                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8110                         tp->napi[i].chk_msi_cnt = 0;
8111                         tp->napi[i].last_rx_cons = 0;
8112                         tp->napi[i].last_tx_cons = 0;
8113                 }
8114                 if (!tg3_flag(tp, ENABLE_TSS))
8115                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8116         } else {
8117                 tp->napi[0].tx_prod = 0;
8118                 tp->napi[0].tx_cons = 0;
8119                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8120                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8121         }
8122
8123         /* Make sure the NIC-based send BD rings are disabled. */
8124         if (!tg3_flag(tp, 5705_PLUS)) {
8125                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8126                 for (i = 0; i < 16; i++)
8127                         tw32_tx_mbox(mbox + i * 8, 0);
8128         }
8129
8130         txrcb = NIC_SRAM_SEND_RCB;
8131         rxrcb = NIC_SRAM_RCV_RET_RCB;
8132
8133         /* Clear status block in ram. */
8134         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8135
8136         /* Set status block DMA address */
8137         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8138              ((u64) tnapi->status_mapping >> 32));
8139         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8140              ((u64) tnapi->status_mapping & 0xffffffff));
8141
8142         if (tnapi->tx_ring) {
8143                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8144                                (TG3_TX_RING_SIZE <<
8145                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8146                                NIC_SRAM_TX_BUFFER_DESC);
8147                 txrcb += TG3_BDINFO_SIZE;
8148         }
8149
8150         if (tnapi->rx_rcb) {
8151                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8152                                (tp->rx_ret_ring_mask + 1) <<
8153                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8154                 rxrcb += TG3_BDINFO_SIZE;
8155         }
8156
8157         stblk = HOSTCC_STATBLCK_RING1;
8158
8159         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8160                 u64 mapping = (u64)tnapi->status_mapping;
8161                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8162                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8163
8164                 /* Clear status block in ram. */
8165                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8166
8167                 if (tnapi->tx_ring) {
8168                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8169                                        (TG3_TX_RING_SIZE <<
8170                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8171                                        NIC_SRAM_TX_BUFFER_DESC);
8172                         txrcb += TG3_BDINFO_SIZE;
8173                 }
8174
8175                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8176                                ((tp->rx_ret_ring_mask + 1) <<
8177                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8178
8179                 stblk += 8;
8180                 rxrcb += TG3_BDINFO_SIZE;
8181         }
8182 }
8183
8184 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8185 {
8186         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8187
8188         if (!tg3_flag(tp, 5750_PLUS) ||
8189             tg3_flag(tp, 5780_CLASS) ||
8190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8192                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8193         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8194                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8195                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8196         else
8197                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8198
8199         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8200         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8201
8202         val = min(nic_rep_thresh, host_rep_thresh);
8203         tw32(RCVBDI_STD_THRESH, val);
8204
8205         if (tg3_flag(tp, 57765_PLUS))
8206                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8207
8208         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8209                 return;
8210
8211         if (!tg3_flag(tp, 5705_PLUS))
8212                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8213         else
8214                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8215
8216         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8217
8218         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8219         tw32(RCVBDI_JUMBO_THRESH, val);
8220
8221         if (tg3_flag(tp, 57765_PLUS))
8222                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8223 }
8224
8225 /* tp->lock is held. */
8226 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8227 {
8228         u32 val, rdmac_mode;
8229         int i, err, limit;
8230         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8231
8232         tg3_disable_ints(tp);
8233
8234         tg3_stop_fw(tp);
8235
8236         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8237
8238         if (tg3_flag(tp, INIT_COMPLETE))
8239                 tg3_abort_hw(tp, 1);
8240
8241         /* Enable MAC control of LPI */
8242         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8243                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8244                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8245                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8246
8247                 tw32_f(TG3_CPMU_EEE_CTRL,
8248                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8249
8250                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8251                       TG3_CPMU_EEEMD_LPI_IN_TX |
8252                       TG3_CPMU_EEEMD_LPI_IN_RX |
8253                       TG3_CPMU_EEEMD_EEE_ENABLE;
8254
8255                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8256                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8257
8258                 if (tg3_flag(tp, ENABLE_APE))
8259                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8260
8261                 tw32_f(TG3_CPMU_EEE_MODE, val);
8262
8263                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8264                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8265                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8266
8267                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8268                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8269                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8270         }
8271
8272         if (reset_phy)
8273                 tg3_phy_reset(tp);
8274
8275         err = tg3_chip_reset(tp);
8276         if (err)
8277                 return err;
8278
8279         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8280
8281         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8282                 val = tr32(TG3_CPMU_CTRL);
8283                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8284                 tw32(TG3_CPMU_CTRL, val);
8285
8286                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8287                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8288                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8289                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8290
8291                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8292                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8293                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8294                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8295
8296                 val = tr32(TG3_CPMU_HST_ACC);
8297                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8298                 val |= CPMU_HST_ACC_MACCLK_6_25;
8299                 tw32(TG3_CPMU_HST_ACC, val);
8300         }
8301
8302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8303                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8304                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8305                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8306                 tw32(PCIE_PWR_MGMT_THRESH, val);
8307
8308                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8309                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8310
8311                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8312
8313                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8314                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8315         }
8316
8317         if (tg3_flag(tp, L1PLLPD_EN)) {
8318                 u32 grc_mode = tr32(GRC_MODE);
8319
8320                 /* Access the lower 1K of PL PCIE block registers. */
8321                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8322                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8323
8324                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8325                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8326                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8327
8328                 tw32(GRC_MODE, grc_mode);
8329         }
8330
8331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8332                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8333                         u32 grc_mode = tr32(GRC_MODE);
8334
8335                         /* Access the lower 1K of PL PCIE block registers. */
8336                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8337                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8338
8339                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8340                                    TG3_PCIE_PL_LO_PHYCTL5);
8341                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8342                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8343
8344                         tw32(GRC_MODE, grc_mode);
8345                 }
8346
8347                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8348                         u32 grc_mode = tr32(GRC_MODE);
8349
8350                         /* Access the lower 1K of DL PCIE block registers. */
8351                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8352                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8353
8354                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8355                                    TG3_PCIE_DL_LO_FTSMAX);
8356                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8357                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8358                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8359
8360                         tw32(GRC_MODE, grc_mode);
8361                 }
8362
8363                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8364                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8365                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8366                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8367         }
8368
8369         /* This works around an issue with Athlon chipsets on
8370          * B3 tigon3 silicon.  This bit has no effect on any
8371          * other revision.  But do not set this on PCI Express
8372          * chips and don't even touch the clocks if the CPMU is present.
8373          */
8374         if (!tg3_flag(tp, CPMU_PRESENT)) {
8375                 if (!tg3_flag(tp, PCI_EXPRESS))
8376                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8377                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8378         }
8379
8380         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8381             tg3_flag(tp, PCIX_MODE)) {
8382                 val = tr32(TG3PCI_PCISTATE);
8383                 val |= PCISTATE_RETRY_SAME_DMA;
8384                 tw32(TG3PCI_PCISTATE, val);
8385         }
8386
8387         if (tg3_flag(tp, ENABLE_APE)) {
8388                 /* Allow reads and writes to the
8389                  * APE register and memory space.
8390                  */
8391                 val = tr32(TG3PCI_PCISTATE);
8392                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8393                        PCISTATE_ALLOW_APE_SHMEM_WR |
8394                        PCISTATE_ALLOW_APE_PSPACE_WR;
8395                 tw32(TG3PCI_PCISTATE, val);
8396         }
8397
8398         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8399                 /* Enable some hw fixes.  */
8400                 val = tr32(TG3PCI_MSI_DATA);
8401                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8402                 tw32(TG3PCI_MSI_DATA, val);
8403         }
8404
8405         /* Descriptor ring init may make accesses to the
8406          * NIC SRAM area to setup the TX descriptors, so we
8407          * can only do this after the hardware has been
8408          * successfully reset.
8409          */
8410         err = tg3_init_rings(tp);
8411         if (err)
8412                 return err;
8413
8414         if (tg3_flag(tp, 57765_PLUS)) {
8415                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8416                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8417                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8418                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8419                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8420                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8421                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8422                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8423         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8424                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8425                 /* This value is determined during the probe time DMA
8426                  * engine test, tg3_test_dma.
8427                  */
8428                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8429         }
8430
8431         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8432                           GRC_MODE_4X_NIC_SEND_RINGS |
8433                           GRC_MODE_NO_TX_PHDR_CSUM |
8434                           GRC_MODE_NO_RX_PHDR_CSUM);
8435         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8436
8437         /* Pseudo-header checksum is done by hardware logic and not
8438          * the offload processers, so make the chip do the pseudo-
8439          * header checksums on receive.  For transmit it is more
8440          * convenient to do the pseudo-header checksum in software
8441          * as Linux does that on transmit for us in all cases.
8442          */
8443         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8444
8445         tw32(GRC_MODE,
8446              tp->grc_mode |
8447              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8448
8449         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8450         val = tr32(GRC_MISC_CFG);
8451         val &= ~0xff;
8452         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8453         tw32(GRC_MISC_CFG, val);
8454
8455         /* Initialize MBUF/DESC pool. */
8456         if (tg3_flag(tp, 5750_PLUS)) {
8457                 /* Do nothing.  */
8458         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8459                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8460                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8461                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8462                 else
8463                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8464                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8465                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8466         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8467                 int fw_len;
8468
8469                 fw_len = tp->fw_len;
8470                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8471                 tw32(BUFMGR_MB_POOL_ADDR,
8472                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8473                 tw32(BUFMGR_MB_POOL_SIZE,
8474                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8475         }
8476
8477         if (tp->dev->mtu <= ETH_DATA_LEN) {
8478                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8479                      tp->bufmgr_config.mbuf_read_dma_low_water);
8480                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8481                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8482                 tw32(BUFMGR_MB_HIGH_WATER,
8483                      tp->bufmgr_config.mbuf_high_water);
8484         } else {
8485                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8486                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8487                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8488                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8489                 tw32(BUFMGR_MB_HIGH_WATER,
8490                      tp->bufmgr_config.mbuf_high_water_jumbo);
8491         }
8492         tw32(BUFMGR_DMA_LOW_WATER,
8493              tp->bufmgr_config.dma_low_water);
8494         tw32(BUFMGR_DMA_HIGH_WATER,
8495              tp->bufmgr_config.dma_high_water);
8496
8497         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8499                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8500         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8501             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8502             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8503                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8504         tw32(BUFMGR_MODE, val);
8505         for (i = 0; i < 2000; i++) {
8506                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8507                         break;
8508                 udelay(10);
8509         }
8510         if (i >= 2000) {
8511                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8512                 return -ENODEV;
8513         }
8514
8515         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8516                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8517
8518         tg3_setup_rxbd_thresholds(tp);
8519
8520         /* Initialize TG3_BDINFO's at:
8521          *  RCVDBDI_STD_BD:     standard eth size rx ring
8522          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8523          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8524          *
8525          * like so:
8526          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8527          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8528          *                              ring attribute flags
8529          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8530          *
8531          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8532          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8533          *
8534          * The size of each ring is fixed in the firmware, but the location is
8535          * configurable.
8536          */
8537         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8538              ((u64) tpr->rx_std_mapping >> 32));
8539         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8540              ((u64) tpr->rx_std_mapping & 0xffffffff));
8541         if (!tg3_flag(tp, 5717_PLUS))
8542                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8543                      NIC_SRAM_RX_BUFFER_DESC);
8544
8545         /* Disable the mini ring */
8546         if (!tg3_flag(tp, 5705_PLUS))
8547                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8548                      BDINFO_FLAGS_DISABLED);
8549
8550         /* Program the jumbo buffer descriptor ring control
8551          * blocks on those devices that have them.
8552          */
8553         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8554             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8555
8556                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8557                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8558                              ((u64) tpr->rx_jmb_mapping >> 32));
8559                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8560                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8561                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8562                               BDINFO_FLAGS_MAXLEN_SHIFT;
8563                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8564                              val | BDINFO_FLAGS_USE_EXT_RECV);
8565                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8566                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8567                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8568                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8569                 } else {
8570                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8571                              BDINFO_FLAGS_DISABLED);
8572                 }
8573
8574                 if (tg3_flag(tp, 57765_PLUS)) {
8575                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8576                                 val = TG3_RX_STD_MAX_SIZE_5700;
8577                         else
8578                                 val = TG3_RX_STD_MAX_SIZE_5717;
8579                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8580                         val |= (TG3_RX_STD_DMA_SZ << 2);
8581                 } else
8582                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8583         } else
8584                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8585
8586         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8587
8588         tpr->rx_std_prod_idx = tp->rx_pending;
8589         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8590
8591         tpr->rx_jmb_prod_idx =
8592                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8593         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8594
8595         tg3_rings_reset(tp);
8596
8597         /* Initialize MAC address and backoff seed. */
8598         __tg3_set_mac_addr(tp, 0);
8599
8600         /* MTU + ethernet header + FCS + optional VLAN tag */
8601         tw32(MAC_RX_MTU_SIZE,
8602              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8603
8604         /* The slot time is changed by tg3_setup_phy if we
8605          * run at gigabit with half duplex.
8606          */
8607         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8608               (6 << TX_LENGTHS_IPG_SHIFT) |
8609               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8610
8611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8612                 val |= tr32(MAC_TX_LENGTHS) &
8613                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8614                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8615
8616         tw32(MAC_TX_LENGTHS, val);
8617
8618         /* Receive rules. */
8619         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8620         tw32(RCVLPC_CONFIG, 0x0181);
8621
8622         /* Calculate RDMAC_MODE setting early, we need it to determine
8623          * the RCVLPC_STATE_ENABLE mask.
8624          */
8625         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8626                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8627                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8628                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8629                       RDMAC_MODE_LNGREAD_ENAB);
8630
8631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8632                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8633
8634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8637                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8638                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8639                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8640
8641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8642             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8643                 if (tg3_flag(tp, TSO_CAPABLE) &&
8644                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8645                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8646                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8647                            !tg3_flag(tp, IS_5788)) {
8648                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8649                 }
8650         }
8651
8652         if (tg3_flag(tp, PCI_EXPRESS))
8653                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8654
8655         if (tg3_flag(tp, HW_TSO_1) ||
8656             tg3_flag(tp, HW_TSO_2) ||
8657             tg3_flag(tp, HW_TSO_3))
8658                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8659
8660         if (tg3_flag(tp, 57765_PLUS) ||
8661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8663                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8664
8665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8666                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8667
8668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8670             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8672             tg3_flag(tp, 57765_PLUS)) {
8673                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8674                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8675                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8676                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8677                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8678                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8679                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8680                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8681                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8682                 }
8683                 tw32(TG3_RDMA_RSRVCTRL_REG,
8684                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8685         }
8686
8687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8689                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8690                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8691                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8692                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8693         }
8694
8695         /* Receive/send statistics. */
8696         if (tg3_flag(tp, 5750_PLUS)) {
8697                 val = tr32(RCVLPC_STATS_ENABLE);
8698                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8699                 tw32(RCVLPC_STATS_ENABLE, val);
8700         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8701                    tg3_flag(tp, TSO_CAPABLE)) {
8702                 val = tr32(RCVLPC_STATS_ENABLE);
8703                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8704                 tw32(RCVLPC_STATS_ENABLE, val);
8705         } else {
8706                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8707         }
8708         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8709         tw32(SNDDATAI_STATSENAB, 0xffffff);
8710         tw32(SNDDATAI_STATSCTRL,
8711              (SNDDATAI_SCTRL_ENABLE |
8712               SNDDATAI_SCTRL_FASTUPD));
8713
8714         /* Setup host coalescing engine. */
8715         tw32(HOSTCC_MODE, 0);
8716         for (i = 0; i < 2000; i++) {
8717                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8718                         break;
8719                 udelay(10);
8720         }
8721
8722         __tg3_set_coalesce(tp, &tp->coal);
8723
8724         if (!tg3_flag(tp, 5705_PLUS)) {
8725                 /* Status/statistics block address.  See tg3_timer,
8726                  * the tg3_periodic_fetch_stats call there, and
8727                  * tg3_get_stats to see how this works for 5705/5750 chips.
8728                  */
8729                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8730                      ((u64) tp->stats_mapping >> 32));
8731                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8732                      ((u64) tp->stats_mapping & 0xffffffff));
8733                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8734
8735                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8736
8737                 /* Clear statistics and status block memory areas */
8738                 for (i = NIC_SRAM_STATS_BLK;
8739                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8740                      i += sizeof(u32)) {
8741                         tg3_write_mem(tp, i, 0);
8742                         udelay(40);
8743                 }
8744         }
8745
8746         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8747
8748         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8749         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8750         if (!tg3_flag(tp, 5705_PLUS))
8751                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8752
8753         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8754                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8755                 /* reset to prevent losing 1st rx packet intermittently */
8756                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8757                 udelay(10);
8758         }
8759
8760         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8761                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8762                         MAC_MODE_FHDE_ENABLE;
8763         if (tg3_flag(tp, ENABLE_APE))
8764                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8765         if (!tg3_flag(tp, 5705_PLUS) &&
8766             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8767             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8768                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8769         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8770         udelay(40);
8771
8772         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8773          * If TG3_FLAG_IS_NIC is zero, we should read the
8774          * register to preserve the GPIO settings for LOMs. The GPIOs,
8775          * whether used as inputs or outputs, are set by boot code after
8776          * reset.
8777          */
8778         if (!tg3_flag(tp, IS_NIC)) {
8779                 u32 gpio_mask;
8780
8781                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8782                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8783                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8784
8785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8786                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8787                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8788
8789                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8790                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8791
8792                 tp->grc_local_ctrl &= ~gpio_mask;
8793                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8794
8795                 /* GPIO1 must be driven high for eeprom write protect */
8796                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8797                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8798                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8799         }
8800         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8801         udelay(100);
8802
8803         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8804                 val = tr32(MSGINT_MODE);
8805                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8806                 if (!tg3_flag(tp, 1SHOT_MSI))
8807                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8808                 tw32(MSGINT_MODE, val);
8809         }
8810
8811         if (!tg3_flag(tp, 5705_PLUS)) {
8812                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8813                 udelay(40);
8814         }
8815
8816         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8817                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8818                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8819                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8820                WDMAC_MODE_LNGREAD_ENAB);
8821
8822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8823             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8824                 if (tg3_flag(tp, TSO_CAPABLE) &&
8825                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8826                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8827                         /* nothing */
8828                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8829                            !tg3_flag(tp, IS_5788)) {
8830                         val |= WDMAC_MODE_RX_ACCEL;
8831                 }
8832         }
8833
8834         /* Enable host coalescing bug fix */
8835         if (tg3_flag(tp, 5755_PLUS))
8836                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8837
8838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8839                 val |= WDMAC_MODE_BURST_ALL_DATA;
8840
8841         tw32_f(WDMAC_MODE, val);
8842         udelay(40);
8843
8844         if (tg3_flag(tp, PCIX_MODE)) {
8845                 u16 pcix_cmd;
8846
8847                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8848                                      &pcix_cmd);
8849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8850                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8851                         pcix_cmd |= PCI_X_CMD_READ_2K;
8852                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8853                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8854                         pcix_cmd |= PCI_X_CMD_READ_2K;
8855                 }
8856                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8857                                       pcix_cmd);
8858         }
8859
8860         tw32_f(RDMAC_MODE, rdmac_mode);
8861         udelay(40);
8862
8863         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8864         if (!tg3_flag(tp, 5705_PLUS))
8865                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8866
8867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8868                 tw32(SNDDATAC_MODE,
8869                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8870         else
8871                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8872
8873         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8874         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8875         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8876         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8877                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8878         tw32(RCVDBDI_MODE, val);
8879         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8880         if (tg3_flag(tp, HW_TSO_1) ||
8881             tg3_flag(tp, HW_TSO_2) ||
8882             tg3_flag(tp, HW_TSO_3))
8883                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8884         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8885         if (tg3_flag(tp, ENABLE_TSS))
8886                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8887         tw32(SNDBDI_MODE, val);
8888         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8889
8890         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8891                 err = tg3_load_5701_a0_firmware_fix(tp);
8892                 if (err)
8893                         return err;
8894         }
8895
8896         if (tg3_flag(tp, TSO_CAPABLE)) {
8897                 err = tg3_load_tso_firmware(tp);
8898                 if (err)
8899                         return err;
8900         }
8901
8902         tp->tx_mode = TX_MODE_ENABLE;
8903
8904         if (tg3_flag(tp, 5755_PLUS) ||
8905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8906                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8907
8908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8909                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8910                 tp->tx_mode &= ~val;
8911                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8912         }
8913
8914         tw32_f(MAC_TX_MODE, tp->tx_mode);
8915         udelay(100);
8916
8917         if (tg3_flag(tp, ENABLE_RSS)) {
8918                 int i = 0;
8919                 u32 reg = MAC_RSS_INDIR_TBL_0;
8920
8921                 if (tp->irq_cnt == 2) {
8922                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8923                                 tw32(reg, 0x0);
8924                                 reg += 4;
8925                         }
8926                 } else {
8927                         u32 val;
8928
8929                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8930                                 val = i % (tp->irq_cnt - 1);
8931                                 i++;
8932                                 for (; i % 8; i++) {
8933                                         val <<= 4;
8934                                         val |= (i % (tp->irq_cnt - 1));
8935                                 }
8936                                 tw32(reg, val);
8937                                 reg += 4;
8938                         }
8939                 }
8940
8941                 /* Setup the "secret" hash key. */
8942                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8943                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8944                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8945                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8946                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8947                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8948                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8949                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8950                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8951                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8952         }
8953
8954         tp->rx_mode = RX_MODE_ENABLE;
8955         if (tg3_flag(tp, 5755_PLUS))
8956                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8957
8958         if (tg3_flag(tp, ENABLE_RSS))
8959                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8960                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8961                                RX_MODE_RSS_IPV6_HASH_EN |
8962                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8963                                RX_MODE_RSS_IPV4_HASH_EN |
8964                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8965
8966         tw32_f(MAC_RX_MODE, tp->rx_mode);
8967         udelay(10);
8968
8969         tw32(MAC_LED_CTRL, tp->led_ctrl);
8970
8971         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8972         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8973                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8974                 udelay(10);
8975         }
8976         tw32_f(MAC_RX_MODE, tp->rx_mode);
8977         udelay(10);
8978
8979         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8980                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8981                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8982                         /* Set drive transmission level to 1.2V  */
8983                         /* only if the signal pre-emphasis bit is not set  */
8984                         val = tr32(MAC_SERDES_CFG);
8985                         val &= 0xfffff000;
8986                         val |= 0x880;
8987                         tw32(MAC_SERDES_CFG, val);
8988                 }
8989                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8990                         tw32(MAC_SERDES_CFG, 0x616000);
8991         }
8992
8993         /* Prevent chip from dropping frames when flow control
8994          * is enabled.
8995          */
8996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8997                 val = 1;
8998         else
8999                 val = 2;
9000         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9001
9002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9003             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9004                 /* Use hardware link auto-negotiation */
9005                 tg3_flag_set(tp, HW_AUTONEG);
9006         }
9007
9008         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9010                 u32 tmp;
9011
9012                 tmp = tr32(SERDES_RX_CTRL);
9013                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9014                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9015                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9016                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9017         }
9018
9019         if (!tg3_flag(tp, USE_PHYLIB)) {
9020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9021                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9022                         tp->link_config.speed = tp->link_config.orig_speed;
9023                         tp->link_config.duplex = tp->link_config.orig_duplex;
9024                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9025                 }
9026
9027                 err = tg3_setup_phy(tp, 0);
9028                 if (err)
9029                         return err;
9030
9031                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9032                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9033                         u32 tmp;
9034
9035                         /* Clear CRC stats. */
9036                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9037                                 tg3_writephy(tp, MII_TG3_TEST1,
9038                                              tmp | MII_TG3_TEST1_CRC_EN);
9039                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9040                         }
9041                 }
9042         }
9043
9044         __tg3_set_rx_mode(tp->dev);
9045
9046         /* Initialize receive rules. */
9047         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9048         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9049         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9050         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9051
9052         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9053                 limit = 8;
9054         else
9055                 limit = 16;
9056         if (tg3_flag(tp, ENABLE_ASF))
9057                 limit -= 4;
9058         switch (limit) {
9059         case 16:
9060                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9061         case 15:
9062                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9063         case 14:
9064                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9065         case 13:
9066                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9067         case 12:
9068                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9069         case 11:
9070                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9071         case 10:
9072                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9073         case 9:
9074                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9075         case 8:
9076                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9077         case 7:
9078                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9079         case 6:
9080                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9081         case 5:
9082                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9083         case 4:
9084                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9085         case 3:
9086                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9087         case 2:
9088         case 1:
9089
9090         default:
9091                 break;
9092         }
9093
9094         if (tg3_flag(tp, ENABLE_APE))
9095                 /* Write our heartbeat update interval to APE. */
9096                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9097                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9098
9099         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9100
9101         return 0;
9102 }
9103
9104 /* Called at device open time to get the chip ready for
9105  * packet processing.  Invoked with tp->lock held.
9106  */
9107 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9108 {
9109         tg3_switch_clocks(tp);
9110
9111         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9112
9113         return tg3_reset_hw(tp, reset_phy);
9114 }
9115
9116 #define TG3_STAT_ADD32(PSTAT, REG) \
9117 do {    u32 __val = tr32(REG); \
9118         (PSTAT)->low += __val; \
9119         if ((PSTAT)->low < __val) \
9120                 (PSTAT)->high += 1; \
9121 } while (0)
9122
9123 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9124 {
9125         struct tg3_hw_stats *sp = tp->hw_stats;
9126
9127         if (!netif_carrier_ok(tp->dev))
9128                 return;
9129
9130         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9131         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9132         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9133         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9134         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9135         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9136         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9137         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9138         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9139         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9140         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9141         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9142         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9143
9144         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9145         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9146         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9147         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9148         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9149         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9150         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9151         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9152         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9153         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9154         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9155         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9156         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9157         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9158
9159         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9160         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9161             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9162             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9163                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9164         } else {
9165                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9166                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9167                 if (val) {
9168                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9169                         sp->rx_discards.low += val;
9170                         if (sp->rx_discards.low < val)
9171                                 sp->rx_discards.high += 1;
9172                 }
9173                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9174         }
9175         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9176 }
9177
9178 static void tg3_chk_missed_msi(struct tg3 *tp)
9179 {
9180         u32 i;
9181
9182         for (i = 0; i < tp->irq_cnt; i++) {
9183                 struct tg3_napi *tnapi = &tp->napi[i];
9184
9185                 if (tg3_has_work(tnapi)) {
9186                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9187                             tnapi->last_tx_cons == tnapi->tx_cons) {
9188                                 if (tnapi->chk_msi_cnt < 1) {
9189                                         tnapi->chk_msi_cnt++;
9190                                         return;
9191                                 }
9192                                 tg3_msi(0, tnapi);
9193                         }
9194                 }
9195                 tnapi->chk_msi_cnt = 0;
9196                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9197                 tnapi->last_tx_cons = tnapi->tx_cons;
9198         }
9199 }
9200
9201 static void tg3_timer(unsigned long __opaque)
9202 {
9203         struct tg3 *tp = (struct tg3 *) __opaque;
9204
9205         if (tp->irq_sync)
9206                 goto restart_timer;
9207
9208         spin_lock(&tp->lock);
9209
9210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9212                 tg3_chk_missed_msi(tp);
9213
9214         if (!tg3_flag(tp, TAGGED_STATUS)) {
9215                 /* All of this garbage is because when using non-tagged
9216                  * IRQ status the mailbox/status_block protocol the chip
9217                  * uses with the cpu is race prone.
9218                  */
9219                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9220                         tw32(GRC_LOCAL_CTRL,
9221                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9222                 } else {
9223                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9224                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9225                 }
9226
9227                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9228                         tg3_flag_set(tp, RESTART_TIMER);
9229                         spin_unlock(&tp->lock);
9230                         schedule_work(&tp->reset_task);
9231                         return;
9232                 }
9233         }
9234
9235         /* This part only runs once per second. */
9236         if (!--tp->timer_counter) {
9237                 if (tg3_flag(tp, 5705_PLUS))
9238                         tg3_periodic_fetch_stats(tp);
9239
9240                 if (tp->setlpicnt && !--tp->setlpicnt)
9241                         tg3_phy_eee_enable(tp);
9242
9243                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9244                         u32 mac_stat;
9245                         int phy_event;
9246
9247                         mac_stat = tr32(MAC_STATUS);
9248
9249                         phy_event = 0;
9250                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9251                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9252                                         phy_event = 1;
9253                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9254                                 phy_event = 1;
9255
9256                         if (phy_event)
9257                                 tg3_setup_phy(tp, 0);
9258                 } else if (tg3_flag(tp, POLL_SERDES)) {
9259                         u32 mac_stat = tr32(MAC_STATUS);
9260                         int need_setup = 0;
9261
9262                         if (netif_carrier_ok(tp->dev) &&
9263                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9264                                 need_setup = 1;
9265                         }
9266                         if (!netif_carrier_ok(tp->dev) &&
9267                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9268                                          MAC_STATUS_SIGNAL_DET))) {
9269                                 need_setup = 1;
9270                         }
9271                         if (need_setup) {
9272                                 if (!tp->serdes_counter) {
9273                                         tw32_f(MAC_MODE,
9274                                              (tp->mac_mode &
9275                                               ~MAC_MODE_PORT_MODE_MASK));
9276                                         udelay(40);
9277                                         tw32_f(MAC_MODE, tp->mac_mode);
9278                                         udelay(40);
9279                                 }
9280                                 tg3_setup_phy(tp, 0);
9281                         }
9282                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9283                            tg3_flag(tp, 5780_CLASS)) {
9284                         tg3_serdes_parallel_detect(tp);
9285                 }
9286
9287                 tp->timer_counter = tp->timer_multiplier;
9288         }
9289
9290         /* Heartbeat is only sent once every 2 seconds.
9291          *
9292          * The heartbeat is to tell the ASF firmware that the host
9293          * driver is still alive.  In the event that the OS crashes,
9294          * ASF needs to reset the hardware to free up the FIFO space
9295          * that may be filled with rx packets destined for the host.
9296          * If the FIFO is full, ASF will no longer function properly.
9297          *
9298          * Unintended resets have been reported on real time kernels
9299          * where the timer doesn't run on time.  Netpoll will also have
9300          * same problem.
9301          *
9302          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9303          * to check the ring condition when the heartbeat is expiring
9304          * before doing the reset.  This will prevent most unintended
9305          * resets.
9306          */
9307         if (!--tp->asf_counter) {
9308                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9309                         tg3_wait_for_event_ack(tp);
9310
9311                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9312                                       FWCMD_NICDRV_ALIVE3);
9313                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9314                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9315                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9316
9317                         tg3_generate_fw_event(tp);
9318                 }
9319                 tp->asf_counter = tp->asf_multiplier;
9320         }
9321
9322         spin_unlock(&tp->lock);
9323
9324 restart_timer:
9325         tp->timer.expires = jiffies + tp->timer_offset;
9326         add_timer(&tp->timer);
9327 }
9328
9329 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9330 {
9331         irq_handler_t fn;
9332         unsigned long flags;
9333         char *name;
9334         struct tg3_napi *tnapi = &tp->napi[irq_num];
9335
9336         if (tp->irq_cnt == 1)
9337                 name = tp->dev->name;
9338         else {
9339                 name = &tnapi->irq_lbl[0];
9340                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9341                 name[IFNAMSIZ-1] = 0;
9342         }
9343
9344         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9345                 fn = tg3_msi;
9346                 if (tg3_flag(tp, 1SHOT_MSI))
9347                         fn = tg3_msi_1shot;
9348                 flags = 0;
9349         } else {
9350                 fn = tg3_interrupt;
9351                 if (tg3_flag(tp, TAGGED_STATUS))
9352                         fn = tg3_interrupt_tagged;
9353                 flags = IRQF_SHARED;
9354         }
9355
9356         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9357 }
9358
9359 static int tg3_test_interrupt(struct tg3 *tp)
9360 {
9361         struct tg3_napi *tnapi = &tp->napi[0];
9362         struct net_device *dev = tp->dev;
9363         int err, i, intr_ok = 0;
9364         u32 val;
9365
9366         if (!netif_running(dev))
9367                 return -ENODEV;
9368
9369         tg3_disable_ints(tp);
9370
9371         free_irq(tnapi->irq_vec, tnapi);
9372
9373         /*
9374          * Turn off MSI one shot mode.  Otherwise this test has no
9375          * observable way to know whether the interrupt was delivered.
9376          */
9377         if (tg3_flag(tp, 57765_PLUS)) {
9378                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9379                 tw32(MSGINT_MODE, val);
9380         }
9381
9382         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9383                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9384         if (err)
9385                 return err;
9386
9387         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9388         tg3_enable_ints(tp);
9389
9390         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9391                tnapi->coal_now);
9392
9393         for (i = 0; i < 5; i++) {
9394                 u32 int_mbox, misc_host_ctrl;
9395
9396                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9397                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9398
9399                 if ((int_mbox != 0) ||
9400                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9401                         intr_ok = 1;
9402                         break;
9403                 }
9404
9405                 if (tg3_flag(tp, 57765_PLUS) &&
9406                     tnapi->hw_status->status_tag != tnapi->last_tag)
9407                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9408
9409                 msleep(10);
9410         }
9411
9412         tg3_disable_ints(tp);
9413
9414         free_irq(tnapi->irq_vec, tnapi);
9415
9416         err = tg3_request_irq(tp, 0);
9417
9418         if (err)
9419                 return err;
9420
9421         if (intr_ok) {
9422                 /* Reenable MSI one shot mode. */
9423                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9424                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9425                         tw32(MSGINT_MODE, val);
9426                 }
9427                 return 0;
9428         }
9429
9430         return -EIO;
9431 }
9432
9433 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9434  * successfully restored
9435  */
9436 static int tg3_test_msi(struct tg3 *tp)
9437 {
9438         int err;
9439         u16 pci_cmd;
9440
9441         if (!tg3_flag(tp, USING_MSI))
9442                 return 0;
9443
9444         /* Turn off SERR reporting in case MSI terminates with Master
9445          * Abort.
9446          */
9447         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9448         pci_write_config_word(tp->pdev, PCI_COMMAND,
9449                               pci_cmd & ~PCI_COMMAND_SERR);
9450
9451         err = tg3_test_interrupt(tp);
9452
9453         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9454
9455         if (!err)
9456                 return 0;
9457
9458         /* other failures */
9459         if (err != -EIO)
9460                 return err;
9461
9462         /* MSI test failed, go back to INTx mode */
9463         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9464                     "to INTx mode. Please report this failure to the PCI "
9465                     "maintainer and include system chipset information\n");
9466
9467         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9468
9469         pci_disable_msi(tp->pdev);
9470
9471         tg3_flag_clear(tp, USING_MSI);
9472         tp->napi[0].irq_vec = tp->pdev->irq;
9473
9474         err = tg3_request_irq(tp, 0);
9475         if (err)
9476                 return err;
9477
9478         /* Need to reset the chip because the MSI cycle may have terminated
9479          * with Master Abort.
9480          */
9481         tg3_full_lock(tp, 1);
9482
9483         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9484         err = tg3_init_hw(tp, 1);
9485
9486         tg3_full_unlock(tp);
9487
9488         if (err)
9489                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9490
9491         return err;
9492 }
9493
9494 static int tg3_request_firmware(struct tg3 *tp)
9495 {
9496         const __be32 *fw_data;
9497
9498         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9499                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9500                            tp->fw_needed);
9501                 return -ENOENT;
9502         }
9503
9504         fw_data = (void *)tp->fw->data;
9505
9506         /* Firmware blob starts with version numbers, followed by
9507          * start address and _full_ length including BSS sections
9508          * (which must be longer than the actual data, of course
9509          */
9510
9511         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9512         if (tp->fw_len < (tp->fw->size - 12)) {
9513                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9514                            tp->fw_len, tp->fw_needed);
9515                 release_firmware(tp->fw);
9516                 tp->fw = NULL;
9517                 return -EINVAL;
9518         }
9519
9520         /* We no longer need firmware; we have it. */
9521         tp->fw_needed = NULL;
9522         return 0;
9523 }
9524
9525 static bool tg3_enable_msix(struct tg3 *tp)
9526 {
9527         int i, rc, cpus = num_online_cpus();
9528         struct msix_entry msix_ent[tp->irq_max];
9529
9530         if (cpus == 1)
9531                 /* Just fallback to the simpler MSI mode. */
9532                 return false;
9533
9534         /*
9535          * We want as many rx rings enabled as there are cpus.
9536          * The first MSIX vector only deals with link interrupts, etc,
9537          * so we add one to the number of vectors we are requesting.
9538          */
9539         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9540
9541         for (i = 0; i < tp->irq_max; i++) {
9542                 msix_ent[i].entry  = i;
9543                 msix_ent[i].vector = 0;
9544         }
9545
9546         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9547         if (rc < 0) {
9548                 return false;
9549         } else if (rc != 0) {
9550                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9551                         return false;
9552                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9553                               tp->irq_cnt, rc);
9554                 tp->irq_cnt = rc;
9555         }
9556
9557         for (i = 0; i < tp->irq_max; i++)
9558                 tp->napi[i].irq_vec = msix_ent[i].vector;
9559
9560         netif_set_real_num_tx_queues(tp->dev, 1);
9561         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9562         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9563                 pci_disable_msix(tp->pdev);
9564                 return false;
9565         }
9566
9567         if (tp->irq_cnt > 1) {
9568                 tg3_flag_set(tp, ENABLE_RSS);
9569
9570                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9571                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9572                         tg3_flag_set(tp, ENABLE_TSS);
9573                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9574                 }
9575         }
9576
9577         return true;
9578 }
9579
9580 static void tg3_ints_init(struct tg3 *tp)
9581 {
9582         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9583             !tg3_flag(tp, TAGGED_STATUS)) {
9584                 /* All MSI supporting chips should support tagged
9585                  * status.  Assert that this is the case.
9586                  */
9587                 netdev_warn(tp->dev,
9588                             "MSI without TAGGED_STATUS? Not using MSI\n");
9589                 goto defcfg;
9590         }
9591
9592         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9593                 tg3_flag_set(tp, USING_MSIX);
9594         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9595                 tg3_flag_set(tp, USING_MSI);
9596
9597         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9598                 u32 msi_mode = tr32(MSGINT_MODE);
9599                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9600                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9601                 if (!tg3_flag(tp, 1SHOT_MSI))
9602                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9603                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9604         }
9605 defcfg:
9606         if (!tg3_flag(tp, USING_MSIX)) {
9607                 tp->irq_cnt = 1;
9608                 tp->napi[0].irq_vec = tp->pdev->irq;
9609                 netif_set_real_num_tx_queues(tp->dev, 1);
9610                 netif_set_real_num_rx_queues(tp->dev, 1);
9611         }
9612 }
9613
9614 static void tg3_ints_fini(struct tg3 *tp)
9615 {
9616         if (tg3_flag(tp, USING_MSIX))
9617                 pci_disable_msix(tp->pdev);
9618         else if (tg3_flag(tp, USING_MSI))
9619                 pci_disable_msi(tp->pdev);
9620         tg3_flag_clear(tp, USING_MSI);
9621         tg3_flag_clear(tp, USING_MSIX);
9622         tg3_flag_clear(tp, ENABLE_RSS);
9623         tg3_flag_clear(tp, ENABLE_TSS);
9624 }
9625
9626 static int tg3_open(struct net_device *dev)
9627 {
9628         struct tg3 *tp = netdev_priv(dev);
9629         int i, err;
9630
9631         if (tp->fw_needed) {
9632                 err = tg3_request_firmware(tp);
9633                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9634                         if (err)
9635                                 return err;
9636                 } else if (err) {
9637                         netdev_warn(tp->dev, "TSO capability disabled\n");
9638                         tg3_flag_clear(tp, TSO_CAPABLE);
9639                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9640                         netdev_notice(tp->dev, "TSO capability restored\n");
9641                         tg3_flag_set(tp, TSO_CAPABLE);
9642                 }
9643         }
9644
9645         netif_carrier_off(tp->dev);
9646
9647         err = tg3_power_up(tp);
9648         if (err)
9649                 return err;
9650
9651         tg3_full_lock(tp, 0);
9652
9653         tg3_disable_ints(tp);
9654         tg3_flag_clear(tp, INIT_COMPLETE);
9655
9656         tg3_full_unlock(tp);
9657
9658         /*
9659          * Setup interrupts first so we know how
9660          * many NAPI resources to allocate
9661          */
9662         tg3_ints_init(tp);
9663
9664         /* The placement of this call is tied
9665          * to the setup and use of Host TX descriptors.
9666          */
9667         err = tg3_alloc_consistent(tp);
9668         if (err)
9669                 goto err_out1;
9670
9671         tg3_napi_init(tp);
9672
9673         tg3_napi_enable(tp);
9674
9675         for (i = 0; i < tp->irq_cnt; i++) {
9676                 struct tg3_napi *tnapi = &tp->napi[i];
9677                 err = tg3_request_irq(tp, i);
9678                 if (err) {
9679                         for (i--; i >= 0; i--)
9680                                 free_irq(tnapi->irq_vec, tnapi);
9681                         break;
9682                 }
9683         }
9684
9685         if (err)
9686                 goto err_out2;
9687
9688         tg3_full_lock(tp, 0);
9689
9690         err = tg3_init_hw(tp, 1);
9691         if (err) {
9692                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9693                 tg3_free_rings(tp);
9694         } else {
9695                 if (tg3_flag(tp, TAGGED_STATUS) &&
9696                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9697                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9698                         tp->timer_offset = HZ;
9699                 else
9700                         tp->timer_offset = HZ / 10;
9701
9702                 BUG_ON(tp->timer_offset > HZ);
9703                 tp->timer_counter = tp->timer_multiplier =
9704                         (HZ / tp->timer_offset);
9705                 tp->asf_counter = tp->asf_multiplier =
9706                         ((HZ / tp->timer_offset) * 2);
9707
9708                 init_timer(&tp->timer);
9709                 tp->timer.expires = jiffies + tp->timer_offset;
9710                 tp->timer.data = (unsigned long) tp;
9711                 tp->timer.function = tg3_timer;
9712         }
9713
9714         tg3_full_unlock(tp);
9715
9716         if (err)
9717                 goto err_out3;
9718
9719         if (tg3_flag(tp, USING_MSI)) {
9720                 err = tg3_test_msi(tp);
9721
9722                 if (err) {
9723                         tg3_full_lock(tp, 0);
9724                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9725                         tg3_free_rings(tp);
9726                         tg3_full_unlock(tp);
9727
9728                         goto err_out2;
9729                 }
9730
9731                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9732                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9733
9734                         tw32(PCIE_TRANSACTION_CFG,
9735                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9736                 }
9737         }
9738
9739         tg3_phy_start(tp);
9740
9741         tg3_full_lock(tp, 0);
9742
9743         add_timer(&tp->timer);
9744         tg3_flag_set(tp, INIT_COMPLETE);
9745         tg3_enable_ints(tp);
9746
9747         tg3_full_unlock(tp);
9748
9749         netif_tx_start_all_queues(dev);
9750
9751         /*
9752          * Reset loopback feature if it was turned on while the device was down
9753          * make sure that it's installed properly now.
9754          */
9755         if (dev->features & NETIF_F_LOOPBACK)
9756                 tg3_set_loopback(dev, dev->features);
9757
9758         return 0;
9759
9760 err_out3:
9761         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9762                 struct tg3_napi *tnapi = &tp->napi[i];
9763                 free_irq(tnapi->irq_vec, tnapi);
9764         }
9765
9766 err_out2:
9767         tg3_napi_disable(tp);
9768         tg3_napi_fini(tp);
9769         tg3_free_consistent(tp);
9770
9771 err_out1:
9772         tg3_ints_fini(tp);
9773         tg3_frob_aux_power(tp, false);
9774         pci_set_power_state(tp->pdev, PCI_D3hot);
9775         return err;
9776 }
9777
9778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9779                                                  struct rtnl_link_stats64 *);
9780 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9781
9782 static int tg3_close(struct net_device *dev)
9783 {
9784         int i;
9785         struct tg3 *tp = netdev_priv(dev);
9786
9787         tg3_napi_disable(tp);
9788         cancel_work_sync(&tp->reset_task);
9789
9790         netif_tx_stop_all_queues(dev);
9791
9792         del_timer_sync(&tp->timer);
9793
9794         tg3_phy_stop(tp);
9795
9796         tg3_full_lock(tp, 1);
9797
9798         tg3_disable_ints(tp);
9799
9800         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9801         tg3_free_rings(tp);
9802         tg3_flag_clear(tp, INIT_COMPLETE);
9803
9804         tg3_full_unlock(tp);
9805
9806         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9807                 struct tg3_napi *tnapi = &tp->napi[i];
9808                 free_irq(tnapi->irq_vec, tnapi);
9809         }
9810
9811         tg3_ints_fini(tp);
9812
9813         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9814
9815         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9816                sizeof(tp->estats_prev));
9817
9818         tg3_napi_fini(tp);
9819
9820         tg3_free_consistent(tp);
9821
9822         tg3_power_down(tp);
9823
9824         netif_carrier_off(tp->dev);
9825
9826         return 0;
9827 }
9828
9829 static inline u64 get_stat64(tg3_stat64_t *val)
9830 {
9831        return ((u64)val->high << 32) | ((u64)val->low);
9832 }
9833
9834 static u64 calc_crc_errors(struct tg3 *tp)
9835 {
9836         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9837
9838         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9839             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9840              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9841                 u32 val;
9842
9843                 spin_lock_bh(&tp->lock);
9844                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9845                         tg3_writephy(tp, MII_TG3_TEST1,
9846                                      val | MII_TG3_TEST1_CRC_EN);
9847                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9848                 } else
9849                         val = 0;
9850                 spin_unlock_bh(&tp->lock);
9851
9852                 tp->phy_crc_errors += val;
9853
9854                 return tp->phy_crc_errors;
9855         }
9856
9857         return get_stat64(&hw_stats->rx_fcs_errors);
9858 }
9859
9860 #define ESTAT_ADD(member) \
9861         estats->member =        old_estats->member + \
9862                                 get_stat64(&hw_stats->member)
9863
9864 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9865 {
9866         struct tg3_ethtool_stats *estats = &tp->estats;
9867         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9868         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9869
9870         if (!hw_stats)
9871                 return old_estats;
9872
9873         ESTAT_ADD(rx_octets);
9874         ESTAT_ADD(rx_fragments);
9875         ESTAT_ADD(rx_ucast_packets);
9876         ESTAT_ADD(rx_mcast_packets);
9877         ESTAT_ADD(rx_bcast_packets);
9878         ESTAT_ADD(rx_fcs_errors);
9879         ESTAT_ADD(rx_align_errors);
9880         ESTAT_ADD(rx_xon_pause_rcvd);
9881         ESTAT_ADD(rx_xoff_pause_rcvd);
9882         ESTAT_ADD(rx_mac_ctrl_rcvd);
9883         ESTAT_ADD(rx_xoff_entered);
9884         ESTAT_ADD(rx_frame_too_long_errors);
9885         ESTAT_ADD(rx_jabbers);
9886         ESTAT_ADD(rx_undersize_packets);
9887         ESTAT_ADD(rx_in_length_errors);
9888         ESTAT_ADD(rx_out_length_errors);
9889         ESTAT_ADD(rx_64_or_less_octet_packets);
9890         ESTAT_ADD(rx_65_to_127_octet_packets);
9891         ESTAT_ADD(rx_128_to_255_octet_packets);
9892         ESTAT_ADD(rx_256_to_511_octet_packets);
9893         ESTAT_ADD(rx_512_to_1023_octet_packets);
9894         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9895         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9896         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9897         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9898         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9899
9900         ESTAT_ADD(tx_octets);
9901         ESTAT_ADD(tx_collisions);
9902         ESTAT_ADD(tx_xon_sent);
9903         ESTAT_ADD(tx_xoff_sent);
9904         ESTAT_ADD(tx_flow_control);
9905         ESTAT_ADD(tx_mac_errors);
9906         ESTAT_ADD(tx_single_collisions);
9907         ESTAT_ADD(tx_mult_collisions);
9908         ESTAT_ADD(tx_deferred);
9909         ESTAT_ADD(tx_excessive_collisions);
9910         ESTAT_ADD(tx_late_collisions);
9911         ESTAT_ADD(tx_collide_2times);
9912         ESTAT_ADD(tx_collide_3times);
9913         ESTAT_ADD(tx_collide_4times);
9914         ESTAT_ADD(tx_collide_5times);
9915         ESTAT_ADD(tx_collide_6times);
9916         ESTAT_ADD(tx_collide_7times);
9917         ESTAT_ADD(tx_collide_8times);
9918         ESTAT_ADD(tx_collide_9times);
9919         ESTAT_ADD(tx_collide_10times);
9920         ESTAT_ADD(tx_collide_11times);
9921         ESTAT_ADD(tx_collide_12times);
9922         ESTAT_ADD(tx_collide_13times);
9923         ESTAT_ADD(tx_collide_14times);
9924         ESTAT_ADD(tx_collide_15times);
9925         ESTAT_ADD(tx_ucast_packets);
9926         ESTAT_ADD(tx_mcast_packets);
9927         ESTAT_ADD(tx_bcast_packets);
9928         ESTAT_ADD(tx_carrier_sense_errors);
9929         ESTAT_ADD(tx_discards);
9930         ESTAT_ADD(tx_errors);
9931
9932         ESTAT_ADD(dma_writeq_full);
9933         ESTAT_ADD(dma_write_prioq_full);
9934         ESTAT_ADD(rxbds_empty);
9935         ESTAT_ADD(rx_discards);
9936         ESTAT_ADD(rx_errors);
9937         ESTAT_ADD(rx_threshold_hit);
9938
9939         ESTAT_ADD(dma_readq_full);
9940         ESTAT_ADD(dma_read_prioq_full);
9941         ESTAT_ADD(tx_comp_queue_full);
9942
9943         ESTAT_ADD(ring_set_send_prod_index);
9944         ESTAT_ADD(ring_status_update);
9945         ESTAT_ADD(nic_irqs);
9946         ESTAT_ADD(nic_avoided_irqs);
9947         ESTAT_ADD(nic_tx_threshold_hit);
9948
9949         ESTAT_ADD(mbuf_lwm_thresh_hit);
9950
9951         return estats;
9952 }
9953
9954 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9955                                                  struct rtnl_link_stats64 *stats)
9956 {
9957         struct tg3 *tp = netdev_priv(dev);
9958         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9959         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9960
9961         if (!hw_stats)
9962                 return old_stats;
9963
9964         stats->rx_packets = old_stats->rx_packets +
9965                 get_stat64(&hw_stats->rx_ucast_packets) +
9966                 get_stat64(&hw_stats->rx_mcast_packets) +
9967                 get_stat64(&hw_stats->rx_bcast_packets);
9968
9969         stats->tx_packets = old_stats->tx_packets +
9970                 get_stat64(&hw_stats->tx_ucast_packets) +
9971                 get_stat64(&hw_stats->tx_mcast_packets) +
9972                 get_stat64(&hw_stats->tx_bcast_packets);
9973
9974         stats->rx_bytes = old_stats->rx_bytes +
9975                 get_stat64(&hw_stats->rx_octets);
9976         stats->tx_bytes = old_stats->tx_bytes +
9977                 get_stat64(&hw_stats->tx_octets);
9978
9979         stats->rx_errors = old_stats->rx_errors +
9980                 get_stat64(&hw_stats->rx_errors);
9981         stats->tx_errors = old_stats->tx_errors +
9982                 get_stat64(&hw_stats->tx_errors) +
9983                 get_stat64(&hw_stats->tx_mac_errors) +
9984                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9985                 get_stat64(&hw_stats->tx_discards);
9986
9987         stats->multicast = old_stats->multicast +
9988                 get_stat64(&hw_stats->rx_mcast_packets);
9989         stats->collisions = old_stats->collisions +
9990                 get_stat64(&hw_stats->tx_collisions);
9991
9992         stats->rx_length_errors = old_stats->rx_length_errors +
9993                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9994                 get_stat64(&hw_stats->rx_undersize_packets);
9995
9996         stats->rx_over_errors = old_stats->rx_over_errors +
9997                 get_stat64(&hw_stats->rxbds_empty);
9998         stats->rx_frame_errors = old_stats->rx_frame_errors +
9999                 get_stat64(&hw_stats->rx_align_errors);
10000         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10001                 get_stat64(&hw_stats->tx_discards);
10002         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10003                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10004
10005         stats->rx_crc_errors = old_stats->rx_crc_errors +
10006                 calc_crc_errors(tp);
10007
10008         stats->rx_missed_errors = old_stats->rx_missed_errors +
10009                 get_stat64(&hw_stats->rx_discards);
10010
10011         stats->rx_dropped = tp->rx_dropped;
10012
10013         return stats;
10014 }
10015
10016 static inline u32 calc_crc(unsigned char *buf, int len)
10017 {
10018         u32 reg;
10019         u32 tmp;
10020         int j, k;
10021
10022         reg = 0xffffffff;
10023
10024         for (j = 0; j < len; j++) {
10025                 reg ^= buf[j];
10026
10027                 for (k = 0; k < 8; k++) {
10028                         tmp = reg & 0x01;
10029
10030                         reg >>= 1;
10031
10032                         if (tmp)
10033                                 reg ^= 0xedb88320;
10034                 }
10035         }
10036
10037         return ~reg;
10038 }
10039
10040 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10041 {
10042         /* accept or reject all multicast frames */
10043         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10044         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10045         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10046         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10047 }
10048
10049 static void __tg3_set_rx_mode(struct net_device *dev)
10050 {
10051         struct tg3 *tp = netdev_priv(dev);
10052         u32 rx_mode;
10053
10054         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10055                                   RX_MODE_KEEP_VLAN_TAG);
10056
10057 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10058         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10059          * flag clear.
10060          */
10061         if (!tg3_flag(tp, ENABLE_ASF))
10062                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10063 #endif
10064
10065         if (dev->flags & IFF_PROMISC) {
10066                 /* Promiscuous mode. */
10067                 rx_mode |= RX_MODE_PROMISC;
10068         } else if (dev->flags & IFF_ALLMULTI) {
10069                 /* Accept all multicast. */
10070                 tg3_set_multi(tp, 1);
10071         } else if (netdev_mc_empty(dev)) {
10072                 /* Reject all multicast. */
10073                 tg3_set_multi(tp, 0);
10074         } else {
10075                 /* Accept one or more multicast(s). */
10076                 struct netdev_hw_addr *ha;
10077                 u32 mc_filter[4] = { 0, };
10078                 u32 regidx;
10079                 u32 bit;
10080                 u32 crc;
10081
10082                 netdev_for_each_mc_addr(ha, dev) {
10083                         crc = calc_crc(ha->addr, ETH_ALEN);
10084                         bit = ~crc & 0x7f;
10085                         regidx = (bit & 0x60) >> 5;
10086                         bit &= 0x1f;
10087                         mc_filter[regidx] |= (1 << bit);
10088                 }
10089
10090                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10091                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10092                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10093                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10094         }
10095
10096         if (rx_mode != tp->rx_mode) {
10097                 tp->rx_mode = rx_mode;
10098                 tw32_f(MAC_RX_MODE, rx_mode);
10099                 udelay(10);
10100         }
10101 }
10102
10103 static void tg3_set_rx_mode(struct net_device *dev)
10104 {
10105         struct tg3 *tp = netdev_priv(dev);
10106
10107         if (!netif_running(dev))
10108                 return;
10109
10110         tg3_full_lock(tp, 0);
10111         __tg3_set_rx_mode(dev);
10112         tg3_full_unlock(tp);
10113 }
10114
10115 static int tg3_get_regs_len(struct net_device *dev)
10116 {
10117         return TG3_REG_BLK_SIZE;
10118 }
10119
10120 static void tg3_get_regs(struct net_device *dev,
10121                 struct ethtool_regs *regs, void *_p)
10122 {
10123         struct tg3 *tp = netdev_priv(dev);
10124
10125         regs->version = 0;
10126
10127         memset(_p, 0, TG3_REG_BLK_SIZE);
10128
10129         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10130                 return;
10131
10132         tg3_full_lock(tp, 0);
10133
10134         tg3_dump_legacy_regs(tp, (u32 *)_p);
10135
10136         tg3_full_unlock(tp);
10137 }
10138
10139 static int tg3_get_eeprom_len(struct net_device *dev)
10140 {
10141         struct tg3 *tp = netdev_priv(dev);
10142
10143         return tp->nvram_size;
10144 }
10145
10146 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10147 {
10148         struct tg3 *tp = netdev_priv(dev);
10149         int ret;
10150         u8  *pd;
10151         u32 i, offset, len, b_offset, b_count;
10152         __be32 val;
10153
10154         if (tg3_flag(tp, NO_NVRAM))
10155                 return -EINVAL;
10156
10157         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10158                 return -EAGAIN;
10159
10160         offset = eeprom->offset;
10161         len = eeprom->len;
10162         eeprom->len = 0;
10163
10164         eeprom->magic = TG3_EEPROM_MAGIC;
10165
10166         if (offset & 3) {
10167                 /* adjustments to start on required 4 byte boundary */
10168                 b_offset = offset & 3;
10169                 b_count = 4 - b_offset;
10170                 if (b_count > len) {
10171                         /* i.e. offset=1 len=2 */
10172                         b_count = len;
10173                 }
10174                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10175                 if (ret)
10176                         return ret;
10177                 memcpy(data, ((char *)&val) + b_offset, b_count);
10178                 len -= b_count;
10179                 offset += b_count;
10180                 eeprom->len += b_count;
10181         }
10182
10183         /* read bytes up to the last 4 byte boundary */
10184         pd = &data[eeprom->len];
10185         for (i = 0; i < (len - (len & 3)); i += 4) {
10186                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10187                 if (ret) {
10188                         eeprom->len += i;
10189                         return ret;
10190                 }
10191                 memcpy(pd + i, &val, 4);
10192         }
10193         eeprom->len += i;
10194
10195         if (len & 3) {
10196                 /* read last bytes not ending on 4 byte boundary */
10197                 pd = &data[eeprom->len];
10198                 b_count = len & 3;
10199                 b_offset = offset + len - b_count;
10200                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10201                 if (ret)
10202                         return ret;
10203                 memcpy(pd, &val, b_count);
10204                 eeprom->len += b_count;
10205         }
10206         return 0;
10207 }
10208
10209 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10210
10211 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10212 {
10213         struct tg3 *tp = netdev_priv(dev);
10214         int ret;
10215         u32 offset, len, b_offset, odd_len;
10216         u8 *buf;
10217         __be32 start, end;
10218
10219         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10220                 return -EAGAIN;
10221
10222         if (tg3_flag(tp, NO_NVRAM) ||
10223             eeprom->magic != TG3_EEPROM_MAGIC)
10224                 return -EINVAL;
10225
10226         offset = eeprom->offset;
10227         len = eeprom->len;
10228
10229         if ((b_offset = (offset & 3))) {
10230                 /* adjustments to start on required 4 byte boundary */
10231                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10232                 if (ret)
10233                         return ret;
10234                 len += b_offset;
10235                 offset &= ~3;
10236                 if (len < 4)
10237                         len = 4;
10238         }
10239
10240         odd_len = 0;
10241         if (len & 3) {
10242                 /* adjustments to end on required 4 byte boundary */
10243                 odd_len = 1;
10244                 len = (len + 3) & ~3;
10245                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10246                 if (ret)
10247                         return ret;
10248         }
10249
10250         buf = data;
10251         if (b_offset || odd_len) {
10252                 buf = kmalloc(len, GFP_KERNEL);
10253                 if (!buf)
10254                         return -ENOMEM;
10255                 if (b_offset)
10256                         memcpy(buf, &start, 4);
10257                 if (odd_len)
10258                         memcpy(buf+len-4, &end, 4);
10259                 memcpy(buf + b_offset, data, eeprom->len);
10260         }
10261
10262         ret = tg3_nvram_write_block(tp, offset, len, buf);
10263
10264         if (buf != data)
10265                 kfree(buf);
10266
10267         return ret;
10268 }
10269
10270 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10271 {
10272         struct tg3 *tp = netdev_priv(dev);
10273
10274         if (tg3_flag(tp, USE_PHYLIB)) {
10275                 struct phy_device *phydev;
10276                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10277                         return -EAGAIN;
10278                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10279                 return phy_ethtool_gset(phydev, cmd);
10280         }
10281
10282         cmd->supported = (SUPPORTED_Autoneg);
10283
10284         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10285                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10286                                    SUPPORTED_1000baseT_Full);
10287
10288         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10289                 cmd->supported |= (SUPPORTED_100baseT_Half |
10290                                   SUPPORTED_100baseT_Full |
10291                                   SUPPORTED_10baseT_Half |
10292                                   SUPPORTED_10baseT_Full |
10293                                   SUPPORTED_TP);
10294                 cmd->port = PORT_TP;
10295         } else {
10296                 cmd->supported |= SUPPORTED_FIBRE;
10297                 cmd->port = PORT_FIBRE;
10298         }
10299
10300         cmd->advertising = tp->link_config.advertising;
10301         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10302                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10303                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10304                                 cmd->advertising |= ADVERTISED_Pause;
10305                         } else {
10306                                 cmd->advertising |= ADVERTISED_Pause |
10307                                                     ADVERTISED_Asym_Pause;
10308                         }
10309                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10310                         cmd->advertising |= ADVERTISED_Asym_Pause;
10311                 }
10312         }
10313         if (netif_running(dev)) {
10314                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10315                 cmd->duplex = tp->link_config.active_duplex;
10316         } else {
10317                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10318                 cmd->duplex = DUPLEX_INVALID;
10319         }
10320         cmd->phy_address = tp->phy_addr;
10321         cmd->transceiver = XCVR_INTERNAL;
10322         cmd->autoneg = tp->link_config.autoneg;
10323         cmd->maxtxpkt = 0;
10324         cmd->maxrxpkt = 0;
10325         return 0;
10326 }
10327
10328 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10329 {
10330         struct tg3 *tp = netdev_priv(dev);
10331         u32 speed = ethtool_cmd_speed(cmd);
10332
10333         if (tg3_flag(tp, USE_PHYLIB)) {
10334                 struct phy_device *phydev;
10335                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10336                         return -EAGAIN;
10337                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10338                 return phy_ethtool_sset(phydev, cmd);
10339         }
10340
10341         if (cmd->autoneg != AUTONEG_ENABLE &&
10342             cmd->autoneg != AUTONEG_DISABLE)
10343                 return -EINVAL;
10344
10345         if (cmd->autoneg == AUTONEG_DISABLE &&
10346             cmd->duplex != DUPLEX_FULL &&
10347             cmd->duplex != DUPLEX_HALF)
10348                 return -EINVAL;
10349
10350         if (cmd->autoneg == AUTONEG_ENABLE) {
10351                 u32 mask = ADVERTISED_Autoneg |
10352                            ADVERTISED_Pause |
10353                            ADVERTISED_Asym_Pause;
10354
10355                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10356                         mask |= ADVERTISED_1000baseT_Half |
10357                                 ADVERTISED_1000baseT_Full;
10358
10359                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10360                         mask |= ADVERTISED_100baseT_Half |
10361                                 ADVERTISED_100baseT_Full |
10362                                 ADVERTISED_10baseT_Half |
10363                                 ADVERTISED_10baseT_Full |
10364                                 ADVERTISED_TP;
10365                 else
10366                         mask |= ADVERTISED_FIBRE;
10367
10368                 if (cmd->advertising & ~mask)
10369                         return -EINVAL;
10370
10371                 mask &= (ADVERTISED_1000baseT_Half |
10372                          ADVERTISED_1000baseT_Full |
10373                          ADVERTISED_100baseT_Half |
10374                          ADVERTISED_100baseT_Full |
10375                          ADVERTISED_10baseT_Half |
10376                          ADVERTISED_10baseT_Full);
10377
10378                 cmd->advertising &= mask;
10379         } else {
10380                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10381                         if (speed != SPEED_1000)
10382                                 return -EINVAL;
10383
10384                         if (cmd->duplex != DUPLEX_FULL)
10385                                 return -EINVAL;
10386                 } else {
10387                         if (speed != SPEED_100 &&
10388                             speed != SPEED_10)
10389                                 return -EINVAL;
10390                 }
10391         }
10392
10393         tg3_full_lock(tp, 0);
10394
10395         tp->link_config.autoneg = cmd->autoneg;
10396         if (cmd->autoneg == AUTONEG_ENABLE) {
10397                 tp->link_config.advertising = (cmd->advertising |
10398                                               ADVERTISED_Autoneg);
10399                 tp->link_config.speed = SPEED_INVALID;
10400                 tp->link_config.duplex = DUPLEX_INVALID;
10401         } else {
10402                 tp->link_config.advertising = 0;
10403                 tp->link_config.speed = speed;
10404                 tp->link_config.duplex = cmd->duplex;
10405         }
10406
10407         tp->link_config.orig_speed = tp->link_config.speed;
10408         tp->link_config.orig_duplex = tp->link_config.duplex;
10409         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10410
10411         if (netif_running(dev))
10412                 tg3_setup_phy(tp, 1);
10413
10414         tg3_full_unlock(tp);
10415
10416         return 0;
10417 }
10418
10419 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10420 {
10421         struct tg3 *tp = netdev_priv(dev);
10422
10423         strcpy(info->driver, DRV_MODULE_NAME);
10424         strcpy(info->version, DRV_MODULE_VERSION);
10425         strcpy(info->fw_version, tp->fw_ver);
10426         strcpy(info->bus_info, pci_name(tp->pdev));
10427 }
10428
10429 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10430 {
10431         struct tg3 *tp = netdev_priv(dev);
10432
10433         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10434                 wol->supported = WAKE_MAGIC;
10435         else
10436                 wol->supported = 0;
10437         wol->wolopts = 0;
10438         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10439                 wol->wolopts = WAKE_MAGIC;
10440         memset(&wol->sopass, 0, sizeof(wol->sopass));
10441 }
10442
10443 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10444 {
10445         struct tg3 *tp = netdev_priv(dev);
10446         struct device *dp = &tp->pdev->dev;
10447
10448         if (wol->wolopts & ~WAKE_MAGIC)
10449                 return -EINVAL;
10450         if ((wol->wolopts & WAKE_MAGIC) &&
10451             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10452                 return -EINVAL;
10453
10454         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10455
10456         spin_lock_bh(&tp->lock);
10457         if (device_may_wakeup(dp))
10458                 tg3_flag_set(tp, WOL_ENABLE);
10459         else
10460                 tg3_flag_clear(tp, WOL_ENABLE);
10461         spin_unlock_bh(&tp->lock);
10462
10463         return 0;
10464 }
10465
10466 static u32 tg3_get_msglevel(struct net_device *dev)
10467 {
10468         struct tg3 *tp = netdev_priv(dev);
10469         return tp->msg_enable;
10470 }
10471
10472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10473 {
10474         struct tg3 *tp = netdev_priv(dev);
10475         tp->msg_enable = value;
10476 }
10477
10478 static int tg3_nway_reset(struct net_device *dev)
10479 {
10480         struct tg3 *tp = netdev_priv(dev);
10481         int r;
10482
10483         if (!netif_running(dev))
10484                 return -EAGAIN;
10485
10486         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10487                 return -EINVAL;
10488
10489         if (tg3_flag(tp, USE_PHYLIB)) {
10490                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10491                         return -EAGAIN;
10492                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10493         } else {
10494                 u32 bmcr;
10495
10496                 spin_lock_bh(&tp->lock);
10497                 r = -EINVAL;
10498                 tg3_readphy(tp, MII_BMCR, &bmcr);
10499                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10500                     ((bmcr & BMCR_ANENABLE) ||
10501                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10502                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10503                                                    BMCR_ANENABLE);
10504                         r = 0;
10505                 }
10506                 spin_unlock_bh(&tp->lock);
10507         }
10508
10509         return r;
10510 }
10511
10512 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10513 {
10514         struct tg3 *tp = netdev_priv(dev);
10515
10516         ering->rx_max_pending = tp->rx_std_ring_mask;
10517         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10518                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10519         else
10520                 ering->rx_jumbo_max_pending = 0;
10521
10522         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10523
10524         ering->rx_pending = tp->rx_pending;
10525         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10526                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10527         else
10528                 ering->rx_jumbo_pending = 0;
10529
10530         ering->tx_pending = tp->napi[0].tx_pending;
10531 }
10532
10533 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10534 {
10535         struct tg3 *tp = netdev_priv(dev);
10536         int i, irq_sync = 0, err = 0;
10537
10538         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10539             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10540             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10541             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10542             (tg3_flag(tp, TSO_BUG) &&
10543              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10544                 return -EINVAL;
10545
10546         if (netif_running(dev)) {
10547                 tg3_phy_stop(tp);
10548                 tg3_netif_stop(tp);
10549                 irq_sync = 1;
10550         }
10551
10552         tg3_full_lock(tp, irq_sync);
10553
10554         tp->rx_pending = ering->rx_pending;
10555
10556         if (tg3_flag(tp, MAX_RXPEND_64) &&
10557             tp->rx_pending > 63)
10558                 tp->rx_pending = 63;
10559         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10560
10561         for (i = 0; i < tp->irq_max; i++)
10562                 tp->napi[i].tx_pending = ering->tx_pending;
10563
10564         if (netif_running(dev)) {
10565                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10566                 err = tg3_restart_hw(tp, 1);
10567                 if (!err)
10568                         tg3_netif_start(tp);
10569         }
10570
10571         tg3_full_unlock(tp);
10572
10573         if (irq_sync && !err)
10574                 tg3_phy_start(tp);
10575
10576         return err;
10577 }
10578
10579 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10580 {
10581         struct tg3 *tp = netdev_priv(dev);
10582
10583         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10584
10585         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10586                 epause->rx_pause = 1;
10587         else
10588                 epause->rx_pause = 0;
10589
10590         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10591                 epause->tx_pause = 1;
10592         else
10593                 epause->tx_pause = 0;
10594 }
10595
10596 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10597 {
10598         struct tg3 *tp = netdev_priv(dev);
10599         int err = 0;
10600
10601         if (tg3_flag(tp, USE_PHYLIB)) {
10602                 u32 newadv;
10603                 struct phy_device *phydev;
10604
10605                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10606
10607                 if (!(phydev->supported & SUPPORTED_Pause) ||
10608                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10609                      (epause->rx_pause != epause->tx_pause)))
10610                         return -EINVAL;
10611
10612                 tp->link_config.flowctrl = 0;
10613                 if (epause->rx_pause) {
10614                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10615
10616                         if (epause->tx_pause) {
10617                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10618                                 newadv = ADVERTISED_Pause;
10619                         } else
10620                                 newadv = ADVERTISED_Pause |
10621                                          ADVERTISED_Asym_Pause;
10622                 } else if (epause->tx_pause) {
10623                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10624                         newadv = ADVERTISED_Asym_Pause;
10625                 } else
10626                         newadv = 0;
10627
10628                 if (epause->autoneg)
10629                         tg3_flag_set(tp, PAUSE_AUTONEG);
10630                 else
10631                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10632
10633                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10634                         u32 oldadv = phydev->advertising &
10635                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10636                         if (oldadv != newadv) {
10637                                 phydev->advertising &=
10638                                         ~(ADVERTISED_Pause |
10639                                           ADVERTISED_Asym_Pause);
10640                                 phydev->advertising |= newadv;
10641                                 if (phydev->autoneg) {
10642                                         /*
10643                                          * Always renegotiate the link to
10644                                          * inform our link partner of our
10645                                          * flow control settings, even if the
10646                                          * flow control is forced.  Let
10647                                          * tg3_adjust_link() do the final
10648                                          * flow control setup.
10649                                          */
10650                                         return phy_start_aneg(phydev);
10651                                 }
10652                         }
10653
10654                         if (!epause->autoneg)
10655                                 tg3_setup_flow_control(tp, 0, 0);
10656                 } else {
10657                         tp->link_config.orig_advertising &=
10658                                         ~(ADVERTISED_Pause |
10659                                           ADVERTISED_Asym_Pause);
10660                         tp->link_config.orig_advertising |= newadv;
10661                 }
10662         } else {
10663                 int irq_sync = 0;
10664
10665                 if (netif_running(dev)) {
10666                         tg3_netif_stop(tp);
10667                         irq_sync = 1;
10668                 }
10669
10670                 tg3_full_lock(tp, irq_sync);
10671
10672                 if (epause->autoneg)
10673                         tg3_flag_set(tp, PAUSE_AUTONEG);
10674                 else
10675                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10676                 if (epause->rx_pause)
10677                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10678                 else
10679                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10680                 if (epause->tx_pause)
10681                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10682                 else
10683                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10684
10685                 if (netif_running(dev)) {
10686                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10687                         err = tg3_restart_hw(tp, 1);
10688                         if (!err)
10689                                 tg3_netif_start(tp);
10690                 }
10691
10692                 tg3_full_unlock(tp);
10693         }
10694
10695         return err;
10696 }
10697
10698 static int tg3_get_sset_count(struct net_device *dev, int sset)
10699 {
10700         switch (sset) {
10701         case ETH_SS_TEST:
10702                 return TG3_NUM_TEST;
10703         case ETH_SS_STATS:
10704                 return TG3_NUM_STATS;
10705         default:
10706                 return -EOPNOTSUPP;
10707         }
10708 }
10709
10710 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10711 {
10712         switch (stringset) {
10713         case ETH_SS_STATS:
10714                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10715                 break;
10716         case ETH_SS_TEST:
10717                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10718                 break;
10719         default:
10720                 WARN_ON(1);     /* we need a WARN() */
10721                 break;
10722         }
10723 }
10724
10725 static int tg3_set_phys_id(struct net_device *dev,
10726                             enum ethtool_phys_id_state state)
10727 {
10728         struct tg3 *tp = netdev_priv(dev);
10729
10730         if (!netif_running(tp->dev))
10731                 return -EAGAIN;
10732
10733         switch (state) {
10734         case ETHTOOL_ID_ACTIVE:
10735                 return 1;       /* cycle on/off once per second */
10736
10737         case ETHTOOL_ID_ON:
10738                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10739                      LED_CTRL_1000MBPS_ON |
10740                      LED_CTRL_100MBPS_ON |
10741                      LED_CTRL_10MBPS_ON |
10742                      LED_CTRL_TRAFFIC_OVERRIDE |
10743                      LED_CTRL_TRAFFIC_BLINK |
10744                      LED_CTRL_TRAFFIC_LED);
10745                 break;
10746
10747         case ETHTOOL_ID_OFF:
10748                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10749                      LED_CTRL_TRAFFIC_OVERRIDE);
10750                 break;
10751
10752         case ETHTOOL_ID_INACTIVE:
10753                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10754                 break;
10755         }
10756
10757         return 0;
10758 }
10759
10760 static void tg3_get_ethtool_stats(struct net_device *dev,
10761                                    struct ethtool_stats *estats, u64 *tmp_stats)
10762 {
10763         struct tg3 *tp = netdev_priv(dev);
10764         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10765 }
10766
10767 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10768 {
10769         int i;
10770         __be32 *buf;
10771         u32 offset = 0, len = 0;
10772         u32 magic, val;
10773
10774         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10775                 return NULL;
10776
10777         if (magic == TG3_EEPROM_MAGIC) {
10778                 for (offset = TG3_NVM_DIR_START;
10779                      offset < TG3_NVM_DIR_END;
10780                      offset += TG3_NVM_DIRENT_SIZE) {
10781                         if (tg3_nvram_read(tp, offset, &val))
10782                                 return NULL;
10783
10784                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10785                             TG3_NVM_DIRTYPE_EXTVPD)
10786                                 break;
10787                 }
10788
10789                 if (offset != TG3_NVM_DIR_END) {
10790                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10791                         if (tg3_nvram_read(tp, offset + 4, &offset))
10792                                 return NULL;
10793
10794                         offset = tg3_nvram_logical_addr(tp, offset);
10795                 }
10796         }
10797
10798         if (!offset || !len) {
10799                 offset = TG3_NVM_VPD_OFF;
10800                 len = TG3_NVM_VPD_LEN;
10801         }
10802
10803         buf = kmalloc(len, GFP_KERNEL);
10804         if (buf == NULL)
10805                 return NULL;
10806
10807         if (magic == TG3_EEPROM_MAGIC) {
10808                 for (i = 0; i < len; i += 4) {
10809                         /* The data is in little-endian format in NVRAM.
10810                          * Use the big-endian read routines to preserve
10811                          * the byte order as it exists in NVRAM.
10812                          */
10813                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10814                                 goto error;
10815                 }
10816         } else {
10817                 u8 *ptr;
10818                 ssize_t cnt;
10819                 unsigned int pos = 0;
10820
10821                 ptr = (u8 *)&buf[0];
10822                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10823                         cnt = pci_read_vpd(tp->pdev, pos,
10824                                            len - pos, ptr);
10825                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10826                                 cnt = 0;
10827                         else if (cnt < 0)
10828                                 goto error;
10829                 }
10830                 if (pos != len)
10831                         goto error;
10832         }
10833
10834         *vpdlen = len;
10835
10836         return buf;
10837
10838 error:
10839         kfree(buf);
10840         return NULL;
10841 }
10842
10843 #define NVRAM_TEST_SIZE 0x100
10844 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10845 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10846 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10847 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10848 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10849 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10850 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10851 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10852
10853 static int tg3_test_nvram(struct tg3 *tp)
10854 {
10855         u32 csum, magic, len;
10856         __be32 *buf;
10857         int i, j, k, err = 0, size;
10858
10859         if (tg3_flag(tp, NO_NVRAM))
10860                 return 0;
10861
10862         if (tg3_nvram_read(tp, 0, &magic) != 0)
10863                 return -EIO;
10864
10865         if (magic == TG3_EEPROM_MAGIC)
10866                 size = NVRAM_TEST_SIZE;
10867         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10868                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10869                     TG3_EEPROM_SB_FORMAT_1) {
10870                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10871                         case TG3_EEPROM_SB_REVISION_0:
10872                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10873                                 break;
10874                         case TG3_EEPROM_SB_REVISION_2:
10875                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10876                                 break;
10877                         case TG3_EEPROM_SB_REVISION_3:
10878                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10879                                 break;
10880                         case TG3_EEPROM_SB_REVISION_4:
10881                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10882                                 break;
10883                         case TG3_EEPROM_SB_REVISION_5:
10884                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10885                                 break;
10886                         case TG3_EEPROM_SB_REVISION_6:
10887                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10888                                 break;
10889                         default:
10890                                 return -EIO;
10891                         }
10892                 } else
10893                         return 0;
10894         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10895                 size = NVRAM_SELFBOOT_HW_SIZE;
10896         else
10897                 return -EIO;
10898
10899         buf = kmalloc(size, GFP_KERNEL);
10900         if (buf == NULL)
10901                 return -ENOMEM;
10902
10903         err = -EIO;
10904         for (i = 0, j = 0; i < size; i += 4, j++) {
10905                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10906                 if (err)
10907                         break;
10908         }
10909         if (i < size)
10910                 goto out;
10911
10912         /* Selfboot format */
10913         magic = be32_to_cpu(buf[0]);
10914         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10915             TG3_EEPROM_MAGIC_FW) {
10916                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10917
10918                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10919                     TG3_EEPROM_SB_REVISION_2) {
10920                         /* For rev 2, the csum doesn't include the MBA. */
10921                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10922                                 csum8 += buf8[i];
10923                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10924                                 csum8 += buf8[i];
10925                 } else {
10926                         for (i = 0; i < size; i++)
10927                                 csum8 += buf8[i];
10928                 }
10929
10930                 if (csum8 == 0) {
10931                         err = 0;
10932                         goto out;
10933                 }
10934
10935                 err = -EIO;
10936                 goto out;
10937         }
10938
10939         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10940             TG3_EEPROM_MAGIC_HW) {
10941                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10942                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10943                 u8 *buf8 = (u8 *) buf;
10944
10945                 /* Separate the parity bits and the data bytes.  */
10946                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10947                         if ((i == 0) || (i == 8)) {
10948                                 int l;
10949                                 u8 msk;
10950
10951                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10952                                         parity[k++] = buf8[i] & msk;
10953                                 i++;
10954                         } else if (i == 16) {
10955                                 int l;
10956                                 u8 msk;
10957
10958                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10959                                         parity[k++] = buf8[i] & msk;
10960                                 i++;
10961
10962                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10963                                         parity[k++] = buf8[i] & msk;
10964                                 i++;
10965                         }
10966                         data[j++] = buf8[i];
10967                 }
10968
10969                 err = -EIO;
10970                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10971                         u8 hw8 = hweight8(data[i]);
10972
10973                         if ((hw8 & 0x1) && parity[i])
10974                                 goto out;
10975                         else if (!(hw8 & 0x1) && !parity[i])
10976                                 goto out;
10977                 }
10978                 err = 0;
10979                 goto out;
10980         }
10981
10982         err = -EIO;
10983
10984         /* Bootstrap checksum at offset 0x10 */
10985         csum = calc_crc((unsigned char *) buf, 0x10);
10986         if (csum != le32_to_cpu(buf[0x10/4]))
10987                 goto out;
10988
10989         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10990         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10991         if (csum != le32_to_cpu(buf[0xfc/4]))
10992                 goto out;
10993
10994         kfree(buf);
10995
10996         buf = tg3_vpd_readblock(tp, &len);
10997         if (!buf)
10998                 return -ENOMEM;
10999
11000         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11001         if (i > 0) {
11002                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11003                 if (j < 0)
11004                         goto out;
11005
11006                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11007                         goto out;
11008
11009                 i += PCI_VPD_LRDT_TAG_SIZE;
11010                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11011                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11012                 if (j > 0) {
11013                         u8 csum8 = 0;
11014
11015                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11016
11017                         for (i = 0; i <= j; i++)
11018                                 csum8 += ((u8 *)buf)[i];
11019
11020                         if (csum8)
11021                                 goto out;
11022                 }
11023         }
11024
11025         err = 0;
11026
11027 out:
11028         kfree(buf);
11029         return err;
11030 }
11031
11032 #define TG3_SERDES_TIMEOUT_SEC  2
11033 #define TG3_COPPER_TIMEOUT_SEC  6
11034
11035 static int tg3_test_link(struct tg3 *tp)
11036 {
11037         int i, max;
11038
11039         if (!netif_running(tp->dev))
11040                 return -ENODEV;
11041
11042         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11043                 max = TG3_SERDES_TIMEOUT_SEC;
11044         else
11045                 max = TG3_COPPER_TIMEOUT_SEC;
11046
11047         for (i = 0; i < max; i++) {
11048                 if (netif_carrier_ok(tp->dev))
11049                         return 0;
11050
11051                 if (msleep_interruptible(1000))
11052                         break;
11053         }
11054
11055         return -EIO;
11056 }
11057
11058 /* Only test the commonly used registers */
11059 static int tg3_test_registers(struct tg3 *tp)
11060 {
11061         int i, is_5705, is_5750;
11062         u32 offset, read_mask, write_mask, val, save_val, read_val;
11063         static struct {
11064                 u16 offset;
11065                 u16 flags;
11066 #define TG3_FL_5705     0x1
11067 #define TG3_FL_NOT_5705 0x2
11068 #define TG3_FL_NOT_5788 0x4
11069 #define TG3_FL_NOT_5750 0x8
11070                 u32 read_mask;
11071                 u32 write_mask;
11072         } reg_tbl[] = {
11073                 /* MAC Control Registers */
11074                 { MAC_MODE, TG3_FL_NOT_5705,
11075                         0x00000000, 0x00ef6f8c },
11076                 { MAC_MODE, TG3_FL_5705,
11077                         0x00000000, 0x01ef6b8c },
11078                 { MAC_STATUS, TG3_FL_NOT_5705,
11079                         0x03800107, 0x00000000 },
11080                 { MAC_STATUS, TG3_FL_5705,
11081                         0x03800100, 0x00000000 },
11082                 { MAC_ADDR_0_HIGH, 0x0000,
11083                         0x00000000, 0x0000ffff },
11084                 { MAC_ADDR_0_LOW, 0x0000,
11085                         0x00000000, 0xffffffff },
11086                 { MAC_RX_MTU_SIZE, 0x0000,
11087                         0x00000000, 0x0000ffff },
11088                 { MAC_TX_MODE, 0x0000,
11089                         0x00000000, 0x00000070 },
11090                 { MAC_TX_LENGTHS, 0x0000,
11091                         0x00000000, 0x00003fff },
11092                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11093                         0x00000000, 0x000007fc },
11094                 { MAC_RX_MODE, TG3_FL_5705,
11095                         0x00000000, 0x000007dc },
11096                 { MAC_HASH_REG_0, 0x0000,
11097                         0x00000000, 0xffffffff },
11098                 { MAC_HASH_REG_1, 0x0000,
11099                         0x00000000, 0xffffffff },
11100                 { MAC_HASH_REG_2, 0x0000,
11101                         0x00000000, 0xffffffff },
11102                 { MAC_HASH_REG_3, 0x0000,
11103                         0x00000000, 0xffffffff },
11104
11105                 /* Receive Data and Receive BD Initiator Control Registers. */
11106                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11107                         0x00000000, 0xffffffff },
11108                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11109                         0x00000000, 0xffffffff },
11110                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11111                         0x00000000, 0x00000003 },
11112                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11113                         0x00000000, 0xffffffff },
11114                 { RCVDBDI_STD_BD+0, 0x0000,
11115                         0x00000000, 0xffffffff },
11116                 { RCVDBDI_STD_BD+4, 0x0000,
11117                         0x00000000, 0xffffffff },
11118                 { RCVDBDI_STD_BD+8, 0x0000,
11119                         0x00000000, 0xffff0002 },
11120                 { RCVDBDI_STD_BD+0xc, 0x0000,
11121                         0x00000000, 0xffffffff },
11122
11123                 /* Receive BD Initiator Control Registers. */
11124                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11125                         0x00000000, 0xffffffff },
11126                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11127                         0x00000000, 0x000003ff },
11128                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11129                         0x00000000, 0xffffffff },
11130
11131                 /* Host Coalescing Control Registers. */
11132                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11133                         0x00000000, 0x00000004 },
11134                 { HOSTCC_MODE, TG3_FL_5705,
11135                         0x00000000, 0x000000f6 },
11136                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11137                         0x00000000, 0xffffffff },
11138                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11139                         0x00000000, 0x000003ff },
11140                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11141                         0x00000000, 0xffffffff },
11142                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11143                         0x00000000, 0x000003ff },
11144                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11145                         0x00000000, 0xffffffff },
11146                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11147                         0x00000000, 0x000000ff },
11148                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11149                         0x00000000, 0xffffffff },
11150                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11151                         0x00000000, 0x000000ff },
11152                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11153                         0x00000000, 0xffffffff },
11154                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11155                         0x00000000, 0xffffffff },
11156                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11157                         0x00000000, 0xffffffff },
11158                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11159                         0x00000000, 0x000000ff },
11160                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11161                         0x00000000, 0xffffffff },
11162                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11163                         0x00000000, 0x000000ff },
11164                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11165                         0x00000000, 0xffffffff },
11166                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11167                         0x00000000, 0xffffffff },
11168                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11169                         0x00000000, 0xffffffff },
11170                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11171                         0x00000000, 0xffffffff },
11172                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11173                         0x00000000, 0xffffffff },
11174                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11175                         0xffffffff, 0x00000000 },
11176                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11177                         0xffffffff, 0x00000000 },
11178
11179                 /* Buffer Manager Control Registers. */
11180                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11181                         0x00000000, 0x007fff80 },
11182                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11183                         0x00000000, 0x007fffff },
11184                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11185                         0x00000000, 0x0000003f },
11186                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11187                         0x00000000, 0x000001ff },
11188                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11189                         0x00000000, 0x000001ff },
11190                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11191                         0xffffffff, 0x00000000 },
11192                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11193                         0xffffffff, 0x00000000 },
11194
11195                 /* Mailbox Registers */
11196                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11197                         0x00000000, 0x000001ff },
11198                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11199                         0x00000000, 0x000001ff },
11200                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11201                         0x00000000, 0x000007ff },
11202                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11203                         0x00000000, 0x000001ff },
11204
11205                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11206         };
11207
11208         is_5705 = is_5750 = 0;
11209         if (tg3_flag(tp, 5705_PLUS)) {
11210                 is_5705 = 1;
11211                 if (tg3_flag(tp, 5750_PLUS))
11212                         is_5750 = 1;
11213         }
11214
11215         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11216                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11217                         continue;
11218
11219                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11220                         continue;
11221
11222                 if (tg3_flag(tp, IS_5788) &&
11223                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11224                         continue;
11225
11226                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11227                         continue;
11228
11229                 offset = (u32) reg_tbl[i].offset;
11230                 read_mask = reg_tbl[i].read_mask;
11231                 write_mask = reg_tbl[i].write_mask;
11232
11233                 /* Save the original register content */
11234                 save_val = tr32(offset);
11235
11236                 /* Determine the read-only value. */
11237                 read_val = save_val & read_mask;
11238
11239                 /* Write zero to the register, then make sure the read-only bits
11240                  * are not changed and the read/write bits are all zeros.
11241                  */
11242                 tw32(offset, 0);
11243
11244                 val = tr32(offset);
11245
11246                 /* Test the read-only and read/write bits. */
11247                 if (((val & read_mask) != read_val) || (val & write_mask))
11248                         goto out;
11249
11250                 /* Write ones to all the bits defined by RdMask and WrMask, then
11251                  * make sure the read-only bits are not changed and the
11252                  * read/write bits are all ones.
11253                  */
11254                 tw32(offset, read_mask | write_mask);
11255
11256                 val = tr32(offset);
11257
11258                 /* Test the read-only bits. */
11259                 if ((val & read_mask) != read_val)
11260                         goto out;
11261
11262                 /* Test the read/write bits. */
11263                 if ((val & write_mask) != write_mask)
11264                         goto out;
11265
11266                 tw32(offset, save_val);
11267         }
11268
11269         return 0;
11270
11271 out:
11272         if (netif_msg_hw(tp))
11273                 netdev_err(tp->dev,
11274                            "Register test failed at offset %x\n", offset);
11275         tw32(offset, save_val);
11276         return -EIO;
11277 }
11278
11279 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11280 {
11281         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11282         int i;
11283         u32 j;
11284
11285         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11286                 for (j = 0; j < len; j += 4) {
11287                         u32 val;
11288
11289                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11290                         tg3_read_mem(tp, offset + j, &val);
11291                         if (val != test_pattern[i])
11292                                 return -EIO;
11293                 }
11294         }
11295         return 0;
11296 }
11297
11298 static int tg3_test_memory(struct tg3 *tp)
11299 {
11300         static struct mem_entry {
11301                 u32 offset;
11302                 u32 len;
11303         } mem_tbl_570x[] = {
11304                 { 0x00000000, 0x00b50},
11305                 { 0x00002000, 0x1c000},
11306                 { 0xffffffff, 0x00000}
11307         }, mem_tbl_5705[] = {
11308                 { 0x00000100, 0x0000c},
11309                 { 0x00000200, 0x00008},
11310                 { 0x00004000, 0x00800},
11311                 { 0x00006000, 0x01000},
11312                 { 0x00008000, 0x02000},
11313                 { 0x00010000, 0x0e000},
11314                 { 0xffffffff, 0x00000}
11315         }, mem_tbl_5755[] = {
11316                 { 0x00000200, 0x00008},
11317                 { 0x00004000, 0x00800},
11318                 { 0x00006000, 0x00800},
11319                 { 0x00008000, 0x02000},
11320                 { 0x00010000, 0x0c000},
11321                 { 0xffffffff, 0x00000}
11322         }, mem_tbl_5906[] = {
11323                 { 0x00000200, 0x00008},
11324                 { 0x00004000, 0x00400},
11325                 { 0x00006000, 0x00400},
11326                 { 0x00008000, 0x01000},
11327                 { 0x00010000, 0x01000},
11328                 { 0xffffffff, 0x00000}
11329         }, mem_tbl_5717[] = {
11330                 { 0x00000200, 0x00008},
11331                 { 0x00010000, 0x0a000},
11332                 { 0x00020000, 0x13c00},
11333                 { 0xffffffff, 0x00000}
11334         }, mem_tbl_57765[] = {
11335                 { 0x00000200, 0x00008},
11336                 { 0x00004000, 0x00800},
11337                 { 0x00006000, 0x09800},
11338                 { 0x00010000, 0x0a000},
11339                 { 0xffffffff, 0x00000}
11340         };
11341         struct mem_entry *mem_tbl;
11342         int err = 0;
11343         int i;
11344
11345         if (tg3_flag(tp, 5717_PLUS))
11346                 mem_tbl = mem_tbl_5717;
11347         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11348                 mem_tbl = mem_tbl_57765;
11349         else if (tg3_flag(tp, 5755_PLUS))
11350                 mem_tbl = mem_tbl_5755;
11351         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11352                 mem_tbl = mem_tbl_5906;
11353         else if (tg3_flag(tp, 5705_PLUS))
11354                 mem_tbl = mem_tbl_5705;
11355         else
11356                 mem_tbl = mem_tbl_570x;
11357
11358         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11359                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11360                 if (err)
11361                         break;
11362         }
11363
11364         return err;
11365 }
11366
11367 #define TG3_TSO_MSS             500
11368
11369 #define TG3_TSO_IP_HDR_LEN      20
11370 #define TG3_TSO_TCP_HDR_LEN     20
11371 #define TG3_TSO_TCP_OPT_LEN     12
11372
11373 static const u8 tg3_tso_header[] = {
11374 0x08, 0x00,
11375 0x45, 0x00, 0x00, 0x00,
11376 0x00, 0x00, 0x40, 0x00,
11377 0x40, 0x06, 0x00, 0x00,
11378 0x0a, 0x00, 0x00, 0x01,
11379 0x0a, 0x00, 0x00, 0x02,
11380 0x0d, 0x00, 0xe0, 0x00,
11381 0x00, 0x00, 0x01, 0x00,
11382 0x00, 0x00, 0x02, 0x00,
11383 0x80, 0x10, 0x10, 0x00,
11384 0x14, 0x09, 0x00, 0x00,
11385 0x01, 0x01, 0x08, 0x0a,
11386 0x11, 0x11, 0x11, 0x11,
11387 0x11, 0x11, 0x11, 0x11,
11388 };
11389
11390 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11391 {
11392         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11393         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11394         u32 budget;
11395         struct sk_buff *skb, *rx_skb;
11396         u8 *tx_data;
11397         dma_addr_t map;
11398         int num_pkts, tx_len, rx_len, i, err;
11399         struct tg3_rx_buffer_desc *desc;
11400         struct tg3_napi *tnapi, *rnapi;
11401         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11402
11403         tnapi = &tp->napi[0];
11404         rnapi = &tp->napi[0];
11405         if (tp->irq_cnt > 1) {
11406                 if (tg3_flag(tp, ENABLE_RSS))
11407                         rnapi = &tp->napi[1];
11408                 if (tg3_flag(tp, ENABLE_TSS))
11409                         tnapi = &tp->napi[1];
11410         }
11411         coal_now = tnapi->coal_now | rnapi->coal_now;
11412
11413         err = -EIO;
11414
11415         tx_len = pktsz;
11416         skb = netdev_alloc_skb(tp->dev, tx_len);
11417         if (!skb)
11418                 return -ENOMEM;
11419
11420         tx_data = skb_put(skb, tx_len);
11421         memcpy(tx_data, tp->dev->dev_addr, 6);
11422         memset(tx_data + 6, 0x0, 8);
11423
11424         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11425
11426         if (tso_loopback) {
11427                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11428
11429                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11430                               TG3_TSO_TCP_OPT_LEN;
11431
11432                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11433                        sizeof(tg3_tso_header));
11434                 mss = TG3_TSO_MSS;
11435
11436                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11437                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11438
11439                 /* Set the total length field in the IP header */
11440                 iph->tot_len = htons((u16)(mss + hdr_len));
11441
11442                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11443                               TXD_FLAG_CPU_POST_DMA);
11444
11445                 if (tg3_flag(tp, HW_TSO_1) ||
11446                     tg3_flag(tp, HW_TSO_2) ||
11447                     tg3_flag(tp, HW_TSO_3)) {
11448                         struct tcphdr *th;
11449                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11450                         th = (struct tcphdr *)&tx_data[val];
11451                         th->check = 0;
11452                 } else
11453                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11454
11455                 if (tg3_flag(tp, HW_TSO_3)) {
11456                         mss |= (hdr_len & 0xc) << 12;
11457                         if (hdr_len & 0x10)
11458                                 base_flags |= 0x00000010;
11459                         base_flags |= (hdr_len & 0x3e0) << 5;
11460                 } else if (tg3_flag(tp, HW_TSO_2))
11461                         mss |= hdr_len << 9;
11462                 else if (tg3_flag(tp, HW_TSO_1) ||
11463                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11464                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11465                 } else {
11466                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11467                 }
11468
11469                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11470         } else {
11471                 num_pkts = 1;
11472                 data_off = ETH_HLEN;
11473         }
11474
11475         for (i = data_off; i < tx_len; i++)
11476                 tx_data[i] = (u8) (i & 0xff);
11477
11478         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11479         if (pci_dma_mapping_error(tp->pdev, map)) {
11480                 dev_kfree_skb(skb);
11481                 return -EIO;
11482         }
11483
11484         val = tnapi->tx_prod;
11485         tnapi->tx_buffers[val].skb = skb;
11486         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11487
11488         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11489                rnapi->coal_now);
11490
11491         udelay(10);
11492
11493         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11494
11495         budget = tg3_tx_avail(tnapi);
11496         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11497                             base_flags | TXD_FLAG_END, mss, 0)) {
11498                 tnapi->tx_buffers[val].skb = NULL;
11499                 dev_kfree_skb(skb);
11500                 return -EIO;
11501         }
11502
11503         tnapi->tx_prod++;
11504
11505         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11506         tr32_mailbox(tnapi->prodmbox);
11507
11508         udelay(10);
11509
11510         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11511         for (i = 0; i < 35; i++) {
11512                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11513                        coal_now);
11514
11515                 udelay(10);
11516
11517                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11518                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11519                 if ((tx_idx == tnapi->tx_prod) &&
11520                     (rx_idx == (rx_start_idx + num_pkts)))
11521                         break;
11522         }
11523
11524         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11525         dev_kfree_skb(skb);
11526
11527         if (tx_idx != tnapi->tx_prod)
11528                 goto out;
11529
11530         if (rx_idx != rx_start_idx + num_pkts)
11531                 goto out;
11532
11533         val = data_off;
11534         while (rx_idx != rx_start_idx) {
11535                 desc = &rnapi->rx_rcb[rx_start_idx++];
11536                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11537                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11538
11539                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11540                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11541                         goto out;
11542
11543                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11544                          - ETH_FCS_LEN;
11545
11546                 if (!tso_loopback) {
11547                         if (rx_len != tx_len)
11548                                 goto out;
11549
11550                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11551                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11552                                         goto out;
11553                         } else {
11554                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11555                                         goto out;
11556                         }
11557                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11558                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11559                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11560                         goto out;
11561                 }
11562
11563                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11564                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11565                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11566                                              mapping);
11567                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11568                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11569                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11570                                              mapping);
11571                 } else
11572                         goto out;
11573
11574                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11575                                             PCI_DMA_FROMDEVICE);
11576
11577                 for (i = data_off; i < rx_len; i++, val++) {
11578                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11579                                 goto out;
11580                 }
11581         }
11582
11583         err = 0;
11584
11585         /* tg3_free_rings will unmap and free the rx_skb */
11586 out:
11587         return err;
11588 }
11589
11590 #define TG3_STD_LOOPBACK_FAILED         1
11591 #define TG3_JMB_LOOPBACK_FAILED         2
11592 #define TG3_TSO_LOOPBACK_FAILED         4
11593 #define TG3_LOOPBACK_FAILED \
11594         (TG3_STD_LOOPBACK_FAILED | \
11595          TG3_JMB_LOOPBACK_FAILED | \
11596          TG3_TSO_LOOPBACK_FAILED)
11597
11598 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11599 {
11600         int err = -EIO;
11601         u32 eee_cap;
11602
11603         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11604         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11605
11606         if (!netif_running(tp->dev)) {
11607                 data[0] = TG3_LOOPBACK_FAILED;
11608                 data[1] = TG3_LOOPBACK_FAILED;
11609                 if (do_extlpbk)
11610                         data[2] = TG3_LOOPBACK_FAILED;
11611                 goto done;
11612         }
11613
11614         err = tg3_reset_hw(tp, 1);
11615         if (err) {
11616                 data[0] = TG3_LOOPBACK_FAILED;
11617                 data[1] = TG3_LOOPBACK_FAILED;
11618                 if (do_extlpbk)
11619                         data[2] = TG3_LOOPBACK_FAILED;
11620                 goto done;
11621         }
11622
11623         if (tg3_flag(tp, ENABLE_RSS)) {
11624                 int i;
11625
11626                 /* Reroute all rx packets to the 1st queue */
11627                 for (i = MAC_RSS_INDIR_TBL_0;
11628                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11629                         tw32(i, 0x0);
11630         }
11631
11632         /* HW errata - mac loopback fails in some cases on 5780.
11633          * Normal traffic and PHY loopback are not affected by
11634          * errata.  Also, the MAC loopback test is deprecated for
11635          * all newer ASIC revisions.
11636          */
11637         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11638             !tg3_flag(tp, CPMU_PRESENT)) {
11639                 tg3_mac_loopback(tp, true);
11640
11641                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11642                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11643
11644                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11645                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11646                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11647
11648                 tg3_mac_loopback(tp, false);
11649         }
11650
11651         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11652             !tg3_flag(tp, USE_PHYLIB)) {
11653                 int i;
11654
11655                 tg3_phy_lpbk_set(tp, 0, false);
11656
11657                 /* Wait for link */
11658                 for (i = 0; i < 100; i++) {
11659                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11660                                 break;
11661                         mdelay(1);
11662                 }
11663
11664                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11665                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11666                 if (tg3_flag(tp, TSO_CAPABLE) &&
11667                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11668                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11669                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11670                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11671                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11672
11673                 if (do_extlpbk) {
11674                         tg3_phy_lpbk_set(tp, 0, true);
11675
11676                         /* All link indications report up, but the hardware
11677                          * isn't really ready for about 20 msec.  Double it
11678                          * to be sure.
11679                          */
11680                         mdelay(40);
11681
11682                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11683                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11684                         if (tg3_flag(tp, TSO_CAPABLE) &&
11685                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11686                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11687                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11688                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11689                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11690                 }
11691
11692                 /* Re-enable gphy autopowerdown. */
11693                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11694                         tg3_phy_toggle_apd(tp, true);
11695         }
11696
11697         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11698
11699 done:
11700         tp->phy_flags |= eee_cap;
11701
11702         return err;
11703 }
11704
11705 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11706                           u64 *data)
11707 {
11708         struct tg3 *tp = netdev_priv(dev);
11709         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11710
11711         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11712             tg3_power_up(tp)) {
11713                 etest->flags |= ETH_TEST_FL_FAILED;
11714                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11715                 return;
11716         }
11717
11718         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11719
11720         if (tg3_test_nvram(tp) != 0) {
11721                 etest->flags |= ETH_TEST_FL_FAILED;
11722                 data[0] = 1;
11723         }
11724         if (!doextlpbk && tg3_test_link(tp)) {
11725                 etest->flags |= ETH_TEST_FL_FAILED;
11726                 data[1] = 1;
11727         }
11728         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11729                 int err, err2 = 0, irq_sync = 0;
11730
11731                 if (netif_running(dev)) {
11732                         tg3_phy_stop(tp);
11733                         tg3_netif_stop(tp);
11734                         irq_sync = 1;
11735                 }
11736
11737                 tg3_full_lock(tp, irq_sync);
11738
11739                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11740                 err = tg3_nvram_lock(tp);
11741                 tg3_halt_cpu(tp, RX_CPU_BASE);
11742                 if (!tg3_flag(tp, 5705_PLUS))
11743                         tg3_halt_cpu(tp, TX_CPU_BASE);
11744                 if (!err)
11745                         tg3_nvram_unlock(tp);
11746
11747                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11748                         tg3_phy_reset(tp);
11749
11750                 if (tg3_test_registers(tp) != 0) {
11751                         etest->flags |= ETH_TEST_FL_FAILED;
11752                         data[2] = 1;
11753                 }
11754
11755                 if (tg3_test_memory(tp) != 0) {
11756                         etest->flags |= ETH_TEST_FL_FAILED;
11757                         data[3] = 1;
11758                 }
11759
11760                 if (doextlpbk)
11761                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11762
11763                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11764                         etest->flags |= ETH_TEST_FL_FAILED;
11765
11766                 tg3_full_unlock(tp);
11767
11768                 if (tg3_test_interrupt(tp) != 0) {
11769                         etest->flags |= ETH_TEST_FL_FAILED;
11770                         data[7] = 1;
11771                 }
11772
11773                 tg3_full_lock(tp, 0);
11774
11775                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11776                 if (netif_running(dev)) {
11777                         tg3_flag_set(tp, INIT_COMPLETE);
11778                         err2 = tg3_restart_hw(tp, 1);
11779                         if (!err2)
11780                                 tg3_netif_start(tp);
11781                 }
11782
11783                 tg3_full_unlock(tp);
11784
11785                 if (irq_sync && !err2)
11786                         tg3_phy_start(tp);
11787         }
11788         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11789                 tg3_power_down(tp);
11790
11791 }
11792
11793 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11794 {
11795         struct mii_ioctl_data *data = if_mii(ifr);
11796         struct tg3 *tp = netdev_priv(dev);
11797         int err;
11798
11799         if (tg3_flag(tp, USE_PHYLIB)) {
11800                 struct phy_device *phydev;
11801                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11802                         return -EAGAIN;
11803                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11804                 return phy_mii_ioctl(phydev, ifr, cmd);
11805         }
11806
11807         switch (cmd) {
11808         case SIOCGMIIPHY:
11809                 data->phy_id = tp->phy_addr;
11810
11811                 /* fallthru */
11812         case SIOCGMIIREG: {
11813                 u32 mii_regval;
11814
11815                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11816                         break;                  /* We have no PHY */
11817
11818                 if (!netif_running(dev))
11819                         return -EAGAIN;
11820
11821                 spin_lock_bh(&tp->lock);
11822                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11823                 spin_unlock_bh(&tp->lock);
11824
11825                 data->val_out = mii_regval;
11826
11827                 return err;
11828         }
11829
11830         case SIOCSMIIREG:
11831                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11832                         break;                  /* We have no PHY */
11833
11834                 if (!netif_running(dev))
11835                         return -EAGAIN;
11836
11837                 spin_lock_bh(&tp->lock);
11838                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11839                 spin_unlock_bh(&tp->lock);
11840
11841                 return err;
11842
11843         default:
11844                 /* do nothing */
11845                 break;
11846         }
11847         return -EOPNOTSUPP;
11848 }
11849
11850 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11851 {
11852         struct tg3 *tp = netdev_priv(dev);
11853
11854         memcpy(ec, &tp->coal, sizeof(*ec));
11855         return 0;
11856 }
11857
11858 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11859 {
11860         struct tg3 *tp = netdev_priv(dev);
11861         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11862         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11863
11864         if (!tg3_flag(tp, 5705_PLUS)) {
11865                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11866                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11867                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11868                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11869         }
11870
11871         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11872             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11873             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11874             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11875             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11876             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11877             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11878             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11879             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11880             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11881                 return -EINVAL;
11882
11883         /* No rx interrupts will be generated if both are zero */
11884         if ((ec->rx_coalesce_usecs == 0) &&
11885             (ec->rx_max_coalesced_frames == 0))
11886                 return -EINVAL;
11887
11888         /* No tx interrupts will be generated if both are zero */
11889         if ((ec->tx_coalesce_usecs == 0) &&
11890             (ec->tx_max_coalesced_frames == 0))
11891                 return -EINVAL;
11892
11893         /* Only copy relevant parameters, ignore all others. */
11894         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11895         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11896         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11897         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11898         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11899         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11900         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11901         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11902         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11903
11904         if (netif_running(dev)) {
11905                 tg3_full_lock(tp, 0);
11906                 __tg3_set_coalesce(tp, &tp->coal);
11907                 tg3_full_unlock(tp);
11908         }
11909         return 0;
11910 }
11911
11912 static const struct ethtool_ops tg3_ethtool_ops = {
11913         .get_settings           = tg3_get_settings,
11914         .set_settings           = tg3_set_settings,
11915         .get_drvinfo            = tg3_get_drvinfo,
11916         .get_regs_len           = tg3_get_regs_len,
11917         .get_regs               = tg3_get_regs,
11918         .get_wol                = tg3_get_wol,
11919         .set_wol                = tg3_set_wol,
11920         .get_msglevel           = tg3_get_msglevel,
11921         .set_msglevel           = tg3_set_msglevel,
11922         .nway_reset             = tg3_nway_reset,
11923         .get_link               = ethtool_op_get_link,
11924         .get_eeprom_len         = tg3_get_eeprom_len,
11925         .get_eeprom             = tg3_get_eeprom,
11926         .set_eeprom             = tg3_set_eeprom,
11927         .get_ringparam          = tg3_get_ringparam,
11928         .set_ringparam          = tg3_set_ringparam,
11929         .get_pauseparam         = tg3_get_pauseparam,
11930         .set_pauseparam         = tg3_set_pauseparam,
11931         .self_test              = tg3_self_test,
11932         .get_strings            = tg3_get_strings,
11933         .set_phys_id            = tg3_set_phys_id,
11934         .get_ethtool_stats      = tg3_get_ethtool_stats,
11935         .get_coalesce           = tg3_get_coalesce,
11936         .set_coalesce           = tg3_set_coalesce,
11937         .get_sset_count         = tg3_get_sset_count,
11938 };
11939
11940 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11941 {
11942         u32 cursize, val, magic;
11943
11944         tp->nvram_size = EEPROM_CHIP_SIZE;
11945
11946         if (tg3_nvram_read(tp, 0, &magic) != 0)
11947                 return;
11948
11949         if ((magic != TG3_EEPROM_MAGIC) &&
11950             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11951             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11952                 return;
11953
11954         /*
11955          * Size the chip by reading offsets at increasing powers of two.
11956          * When we encounter our validation signature, we know the addressing
11957          * has wrapped around, and thus have our chip size.
11958          */
11959         cursize = 0x10;
11960
11961         while (cursize < tp->nvram_size) {
11962                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11963                         return;
11964
11965                 if (val == magic)
11966                         break;
11967
11968                 cursize <<= 1;
11969         }
11970
11971         tp->nvram_size = cursize;
11972 }
11973
11974 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11975 {
11976         u32 val;
11977
11978         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11979                 return;
11980
11981         /* Selfboot format */
11982         if (val != TG3_EEPROM_MAGIC) {
11983                 tg3_get_eeprom_size(tp);
11984                 return;
11985         }
11986
11987         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11988                 if (val != 0) {
11989                         /* This is confusing.  We want to operate on the
11990                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11991                          * call will read from NVRAM and byteswap the data
11992                          * according to the byteswapping settings for all
11993                          * other register accesses.  This ensures the data we
11994                          * want will always reside in the lower 16-bits.
11995                          * However, the data in NVRAM is in LE format, which
11996                          * means the data from the NVRAM read will always be
11997                          * opposite the endianness of the CPU.  The 16-bit
11998                          * byteswap then brings the data to CPU endianness.
11999                          */
12000                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12001                         return;
12002                 }
12003         }
12004         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12005 }
12006
12007 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12008 {
12009         u32 nvcfg1;
12010
12011         nvcfg1 = tr32(NVRAM_CFG1);
12012         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12013                 tg3_flag_set(tp, FLASH);
12014         } else {
12015                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12016                 tw32(NVRAM_CFG1, nvcfg1);
12017         }
12018
12019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12020             tg3_flag(tp, 5780_CLASS)) {
12021                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12022                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12023                         tp->nvram_jedecnum = JEDEC_ATMEL;
12024                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12025                         tg3_flag_set(tp, NVRAM_BUFFERED);
12026                         break;
12027                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12028                         tp->nvram_jedecnum = JEDEC_ATMEL;
12029                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12030                         break;
12031                 case FLASH_VENDOR_ATMEL_EEPROM:
12032                         tp->nvram_jedecnum = JEDEC_ATMEL;
12033                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12034                         tg3_flag_set(tp, NVRAM_BUFFERED);
12035                         break;
12036                 case FLASH_VENDOR_ST:
12037                         tp->nvram_jedecnum = JEDEC_ST;
12038                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12039                         tg3_flag_set(tp, NVRAM_BUFFERED);
12040                         break;
12041                 case FLASH_VENDOR_SAIFUN:
12042                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12043                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12044                         break;
12045                 case FLASH_VENDOR_SST_SMALL:
12046                 case FLASH_VENDOR_SST_LARGE:
12047                         tp->nvram_jedecnum = JEDEC_SST;
12048                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12049                         break;
12050                 }
12051         } else {
12052                 tp->nvram_jedecnum = JEDEC_ATMEL;
12053                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12054                 tg3_flag_set(tp, NVRAM_BUFFERED);
12055         }
12056 }
12057
12058 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12059 {
12060         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12061         case FLASH_5752PAGE_SIZE_256:
12062                 tp->nvram_pagesize = 256;
12063                 break;
12064         case FLASH_5752PAGE_SIZE_512:
12065                 tp->nvram_pagesize = 512;
12066                 break;
12067         case FLASH_5752PAGE_SIZE_1K:
12068                 tp->nvram_pagesize = 1024;
12069                 break;
12070         case FLASH_5752PAGE_SIZE_2K:
12071                 tp->nvram_pagesize = 2048;
12072                 break;
12073         case FLASH_5752PAGE_SIZE_4K:
12074                 tp->nvram_pagesize = 4096;
12075                 break;
12076         case FLASH_5752PAGE_SIZE_264:
12077                 tp->nvram_pagesize = 264;
12078                 break;
12079         case FLASH_5752PAGE_SIZE_528:
12080                 tp->nvram_pagesize = 528;
12081                 break;
12082         }
12083 }
12084
12085 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12086 {
12087         u32 nvcfg1;
12088
12089         nvcfg1 = tr32(NVRAM_CFG1);
12090
12091         /* NVRAM protection for TPM */
12092         if (nvcfg1 & (1 << 27))
12093                 tg3_flag_set(tp, PROTECTED_NVRAM);
12094
12095         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12096         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12097         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12098                 tp->nvram_jedecnum = JEDEC_ATMEL;
12099                 tg3_flag_set(tp, NVRAM_BUFFERED);
12100                 break;
12101         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12102                 tp->nvram_jedecnum = JEDEC_ATMEL;
12103                 tg3_flag_set(tp, NVRAM_BUFFERED);
12104                 tg3_flag_set(tp, FLASH);
12105                 break;
12106         case FLASH_5752VENDOR_ST_M45PE10:
12107         case FLASH_5752VENDOR_ST_M45PE20:
12108         case FLASH_5752VENDOR_ST_M45PE40:
12109                 tp->nvram_jedecnum = JEDEC_ST;
12110                 tg3_flag_set(tp, NVRAM_BUFFERED);
12111                 tg3_flag_set(tp, FLASH);
12112                 break;
12113         }
12114
12115         if (tg3_flag(tp, FLASH)) {
12116                 tg3_nvram_get_pagesize(tp, nvcfg1);
12117         } else {
12118                 /* For eeprom, set pagesize to maximum eeprom size */
12119                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12120
12121                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12122                 tw32(NVRAM_CFG1, nvcfg1);
12123         }
12124 }
12125
12126 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12127 {
12128         u32 nvcfg1, protect = 0;
12129
12130         nvcfg1 = tr32(NVRAM_CFG1);
12131
12132         /* NVRAM protection for TPM */
12133         if (nvcfg1 & (1 << 27)) {
12134                 tg3_flag_set(tp, PROTECTED_NVRAM);
12135                 protect = 1;
12136         }
12137
12138         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12139         switch (nvcfg1) {
12140         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12141         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12142         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12143         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12144                 tp->nvram_jedecnum = JEDEC_ATMEL;
12145                 tg3_flag_set(tp, NVRAM_BUFFERED);
12146                 tg3_flag_set(tp, FLASH);
12147                 tp->nvram_pagesize = 264;
12148                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12149                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12150                         tp->nvram_size = (protect ? 0x3e200 :
12151                                           TG3_NVRAM_SIZE_512KB);
12152                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12153                         tp->nvram_size = (protect ? 0x1f200 :
12154                                           TG3_NVRAM_SIZE_256KB);
12155                 else
12156                         tp->nvram_size = (protect ? 0x1f200 :
12157                                           TG3_NVRAM_SIZE_128KB);
12158                 break;
12159         case FLASH_5752VENDOR_ST_M45PE10:
12160         case FLASH_5752VENDOR_ST_M45PE20:
12161         case FLASH_5752VENDOR_ST_M45PE40:
12162                 tp->nvram_jedecnum = JEDEC_ST;
12163                 tg3_flag_set(tp, NVRAM_BUFFERED);
12164                 tg3_flag_set(tp, FLASH);
12165                 tp->nvram_pagesize = 256;
12166                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12167                         tp->nvram_size = (protect ?
12168                                           TG3_NVRAM_SIZE_64KB :
12169                                           TG3_NVRAM_SIZE_128KB);
12170                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12171                         tp->nvram_size = (protect ?
12172                                           TG3_NVRAM_SIZE_64KB :
12173                                           TG3_NVRAM_SIZE_256KB);
12174                 else
12175                         tp->nvram_size = (protect ?
12176                                           TG3_NVRAM_SIZE_128KB :
12177                                           TG3_NVRAM_SIZE_512KB);
12178                 break;
12179         }
12180 }
12181
12182 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12183 {
12184         u32 nvcfg1;
12185
12186         nvcfg1 = tr32(NVRAM_CFG1);
12187
12188         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12189         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12190         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12191         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12192         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12193                 tp->nvram_jedecnum = JEDEC_ATMEL;
12194                 tg3_flag_set(tp, NVRAM_BUFFERED);
12195                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12196
12197                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12198                 tw32(NVRAM_CFG1, nvcfg1);
12199                 break;
12200         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12201         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12202         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12203         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12204                 tp->nvram_jedecnum = JEDEC_ATMEL;
12205                 tg3_flag_set(tp, NVRAM_BUFFERED);
12206                 tg3_flag_set(tp, FLASH);
12207                 tp->nvram_pagesize = 264;
12208                 break;
12209         case FLASH_5752VENDOR_ST_M45PE10:
12210         case FLASH_5752VENDOR_ST_M45PE20:
12211         case FLASH_5752VENDOR_ST_M45PE40:
12212                 tp->nvram_jedecnum = JEDEC_ST;
12213                 tg3_flag_set(tp, NVRAM_BUFFERED);
12214                 tg3_flag_set(tp, FLASH);
12215                 tp->nvram_pagesize = 256;
12216                 break;
12217         }
12218 }
12219
12220 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12221 {
12222         u32 nvcfg1, protect = 0;
12223
12224         nvcfg1 = tr32(NVRAM_CFG1);
12225
12226         /* NVRAM protection for TPM */
12227         if (nvcfg1 & (1 << 27)) {
12228                 tg3_flag_set(tp, PROTECTED_NVRAM);
12229                 protect = 1;
12230         }
12231
12232         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12233         switch (nvcfg1) {
12234         case FLASH_5761VENDOR_ATMEL_ADB021D:
12235         case FLASH_5761VENDOR_ATMEL_ADB041D:
12236         case FLASH_5761VENDOR_ATMEL_ADB081D:
12237         case FLASH_5761VENDOR_ATMEL_ADB161D:
12238         case FLASH_5761VENDOR_ATMEL_MDB021D:
12239         case FLASH_5761VENDOR_ATMEL_MDB041D:
12240         case FLASH_5761VENDOR_ATMEL_MDB081D:
12241         case FLASH_5761VENDOR_ATMEL_MDB161D:
12242                 tp->nvram_jedecnum = JEDEC_ATMEL;
12243                 tg3_flag_set(tp, NVRAM_BUFFERED);
12244                 tg3_flag_set(tp, FLASH);
12245                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12246                 tp->nvram_pagesize = 256;
12247                 break;
12248         case FLASH_5761VENDOR_ST_A_M45PE20:
12249         case FLASH_5761VENDOR_ST_A_M45PE40:
12250         case FLASH_5761VENDOR_ST_A_M45PE80:
12251         case FLASH_5761VENDOR_ST_A_M45PE16:
12252         case FLASH_5761VENDOR_ST_M_M45PE20:
12253         case FLASH_5761VENDOR_ST_M_M45PE40:
12254         case FLASH_5761VENDOR_ST_M_M45PE80:
12255         case FLASH_5761VENDOR_ST_M_M45PE16:
12256                 tp->nvram_jedecnum = JEDEC_ST;
12257                 tg3_flag_set(tp, NVRAM_BUFFERED);
12258                 tg3_flag_set(tp, FLASH);
12259                 tp->nvram_pagesize = 256;
12260                 break;
12261         }
12262
12263         if (protect) {
12264                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12265         } else {
12266                 switch (nvcfg1) {
12267                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12268                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12269                 case FLASH_5761VENDOR_ST_A_M45PE16:
12270                 case FLASH_5761VENDOR_ST_M_M45PE16:
12271                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12272                         break;
12273                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12274                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12275                 case FLASH_5761VENDOR_ST_A_M45PE80:
12276                 case FLASH_5761VENDOR_ST_M_M45PE80:
12277                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12278                         break;
12279                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12280                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12281                 case FLASH_5761VENDOR_ST_A_M45PE40:
12282                 case FLASH_5761VENDOR_ST_M_M45PE40:
12283                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12284                         break;
12285                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12286                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12287                 case FLASH_5761VENDOR_ST_A_M45PE20:
12288                 case FLASH_5761VENDOR_ST_M_M45PE20:
12289                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12290                         break;
12291                 }
12292         }
12293 }
12294
12295 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12296 {
12297         tp->nvram_jedecnum = JEDEC_ATMEL;
12298         tg3_flag_set(tp, NVRAM_BUFFERED);
12299         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12300 }
12301
12302 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12303 {
12304         u32 nvcfg1;
12305
12306         nvcfg1 = tr32(NVRAM_CFG1);
12307
12308         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12309         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12310         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12311                 tp->nvram_jedecnum = JEDEC_ATMEL;
12312                 tg3_flag_set(tp, NVRAM_BUFFERED);
12313                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12314
12315                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12316                 tw32(NVRAM_CFG1, nvcfg1);
12317                 return;
12318         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12319         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12320         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12321         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12322         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12323         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12324         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12325                 tp->nvram_jedecnum = JEDEC_ATMEL;
12326                 tg3_flag_set(tp, NVRAM_BUFFERED);
12327                 tg3_flag_set(tp, FLASH);
12328
12329                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12330                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12331                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12332                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12333                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12334                         break;
12335                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12336                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12337                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12338                         break;
12339                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12340                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12341                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12342                         break;
12343                 }
12344                 break;
12345         case FLASH_5752VENDOR_ST_M45PE10:
12346         case FLASH_5752VENDOR_ST_M45PE20:
12347         case FLASH_5752VENDOR_ST_M45PE40:
12348                 tp->nvram_jedecnum = JEDEC_ST;
12349                 tg3_flag_set(tp, NVRAM_BUFFERED);
12350                 tg3_flag_set(tp, FLASH);
12351
12352                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12353                 case FLASH_5752VENDOR_ST_M45PE10:
12354                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12355                         break;
12356                 case FLASH_5752VENDOR_ST_M45PE20:
12357                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12358                         break;
12359                 case FLASH_5752VENDOR_ST_M45PE40:
12360                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12361                         break;
12362                 }
12363                 break;
12364         default:
12365                 tg3_flag_set(tp, NO_NVRAM);
12366                 return;
12367         }
12368
12369         tg3_nvram_get_pagesize(tp, nvcfg1);
12370         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12371                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12372 }
12373
12374
12375 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12376 {
12377         u32 nvcfg1;
12378
12379         nvcfg1 = tr32(NVRAM_CFG1);
12380
12381         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12382         case FLASH_5717VENDOR_ATMEL_EEPROM:
12383         case FLASH_5717VENDOR_MICRO_EEPROM:
12384                 tp->nvram_jedecnum = JEDEC_ATMEL;
12385                 tg3_flag_set(tp, NVRAM_BUFFERED);
12386                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12387
12388                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12389                 tw32(NVRAM_CFG1, nvcfg1);
12390                 return;
12391         case FLASH_5717VENDOR_ATMEL_MDB011D:
12392         case FLASH_5717VENDOR_ATMEL_ADB011B:
12393         case FLASH_5717VENDOR_ATMEL_ADB011D:
12394         case FLASH_5717VENDOR_ATMEL_MDB021D:
12395         case FLASH_5717VENDOR_ATMEL_ADB021B:
12396         case FLASH_5717VENDOR_ATMEL_ADB021D:
12397         case FLASH_5717VENDOR_ATMEL_45USPT:
12398                 tp->nvram_jedecnum = JEDEC_ATMEL;
12399                 tg3_flag_set(tp, NVRAM_BUFFERED);
12400                 tg3_flag_set(tp, FLASH);
12401
12402                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12403                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12404                         /* Detect size with tg3_nvram_get_size() */
12405                         break;
12406                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12407                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12408                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12409                         break;
12410                 default:
12411                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12412                         break;
12413                 }
12414                 break;
12415         case FLASH_5717VENDOR_ST_M_M25PE10:
12416         case FLASH_5717VENDOR_ST_A_M25PE10:
12417         case FLASH_5717VENDOR_ST_M_M45PE10:
12418         case FLASH_5717VENDOR_ST_A_M45PE10:
12419         case FLASH_5717VENDOR_ST_M_M25PE20:
12420         case FLASH_5717VENDOR_ST_A_M25PE20:
12421         case FLASH_5717VENDOR_ST_M_M45PE20:
12422         case FLASH_5717VENDOR_ST_A_M45PE20:
12423         case FLASH_5717VENDOR_ST_25USPT:
12424         case FLASH_5717VENDOR_ST_45USPT:
12425                 tp->nvram_jedecnum = JEDEC_ST;
12426                 tg3_flag_set(tp, NVRAM_BUFFERED);
12427                 tg3_flag_set(tp, FLASH);
12428
12429                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12430                 case FLASH_5717VENDOR_ST_M_M25PE20:
12431                 case FLASH_5717VENDOR_ST_M_M45PE20:
12432                         /* Detect size with tg3_nvram_get_size() */
12433                         break;
12434                 case FLASH_5717VENDOR_ST_A_M25PE20:
12435                 case FLASH_5717VENDOR_ST_A_M45PE20:
12436                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12437                         break;
12438                 default:
12439                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12440                         break;
12441                 }
12442                 break;
12443         default:
12444                 tg3_flag_set(tp, NO_NVRAM);
12445                 return;
12446         }
12447
12448         tg3_nvram_get_pagesize(tp, nvcfg1);
12449         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12450                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12451 }
12452
12453 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12454 {
12455         u32 nvcfg1, nvmpinstrp;
12456
12457         nvcfg1 = tr32(NVRAM_CFG1);
12458         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12459
12460         switch (nvmpinstrp) {
12461         case FLASH_5720_EEPROM_HD:
12462         case FLASH_5720_EEPROM_LD:
12463                 tp->nvram_jedecnum = JEDEC_ATMEL;
12464                 tg3_flag_set(tp, NVRAM_BUFFERED);
12465
12466                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12467                 tw32(NVRAM_CFG1, nvcfg1);
12468                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12469                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12470                 else
12471                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12472                 return;
12473         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12474         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12475         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12476         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12477         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12478         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12479         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12480         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12481         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12482         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12483         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12484         case FLASH_5720VENDOR_ATMEL_45USPT:
12485                 tp->nvram_jedecnum = JEDEC_ATMEL;
12486                 tg3_flag_set(tp, NVRAM_BUFFERED);
12487                 tg3_flag_set(tp, FLASH);
12488
12489                 switch (nvmpinstrp) {
12490                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12491                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12492                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12493                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12494                         break;
12495                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12496                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12497                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12498                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12499                         break;
12500                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12501                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12502                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12503                         break;
12504                 default:
12505                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12506                         break;
12507                 }
12508                 break;
12509         case FLASH_5720VENDOR_M_ST_M25PE10:
12510         case FLASH_5720VENDOR_M_ST_M45PE10:
12511         case FLASH_5720VENDOR_A_ST_M25PE10:
12512         case FLASH_5720VENDOR_A_ST_M45PE10:
12513         case FLASH_5720VENDOR_M_ST_M25PE20:
12514         case FLASH_5720VENDOR_M_ST_M45PE20:
12515         case FLASH_5720VENDOR_A_ST_M25PE20:
12516         case FLASH_5720VENDOR_A_ST_M45PE20:
12517         case FLASH_5720VENDOR_M_ST_M25PE40:
12518         case FLASH_5720VENDOR_M_ST_M45PE40:
12519         case FLASH_5720VENDOR_A_ST_M25PE40:
12520         case FLASH_5720VENDOR_A_ST_M45PE40:
12521         case FLASH_5720VENDOR_M_ST_M25PE80:
12522         case FLASH_5720VENDOR_M_ST_M45PE80:
12523         case FLASH_5720VENDOR_A_ST_M25PE80:
12524         case FLASH_5720VENDOR_A_ST_M45PE80:
12525         case FLASH_5720VENDOR_ST_25USPT:
12526         case FLASH_5720VENDOR_ST_45USPT:
12527                 tp->nvram_jedecnum = JEDEC_ST;
12528                 tg3_flag_set(tp, NVRAM_BUFFERED);
12529                 tg3_flag_set(tp, FLASH);
12530
12531                 switch (nvmpinstrp) {
12532                 case FLASH_5720VENDOR_M_ST_M25PE20:
12533                 case FLASH_5720VENDOR_M_ST_M45PE20:
12534                 case FLASH_5720VENDOR_A_ST_M25PE20:
12535                 case FLASH_5720VENDOR_A_ST_M45PE20:
12536                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12537                         break;
12538                 case FLASH_5720VENDOR_M_ST_M25PE40:
12539                 case FLASH_5720VENDOR_M_ST_M45PE40:
12540                 case FLASH_5720VENDOR_A_ST_M25PE40:
12541                 case FLASH_5720VENDOR_A_ST_M45PE40:
12542                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12543                         break;
12544                 case FLASH_5720VENDOR_M_ST_M25PE80:
12545                 case FLASH_5720VENDOR_M_ST_M45PE80:
12546                 case FLASH_5720VENDOR_A_ST_M25PE80:
12547                 case FLASH_5720VENDOR_A_ST_M45PE80:
12548                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12549                         break;
12550                 default:
12551                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12552                         break;
12553                 }
12554                 break;
12555         default:
12556                 tg3_flag_set(tp, NO_NVRAM);
12557                 return;
12558         }
12559
12560         tg3_nvram_get_pagesize(tp, nvcfg1);
12561         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12562                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12563 }
12564
12565 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12566 static void __devinit tg3_nvram_init(struct tg3 *tp)
12567 {
12568         tw32_f(GRC_EEPROM_ADDR,
12569              (EEPROM_ADDR_FSM_RESET |
12570               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12571                EEPROM_ADDR_CLKPERD_SHIFT)));
12572
12573         msleep(1);
12574
12575         /* Enable seeprom accesses. */
12576         tw32_f(GRC_LOCAL_CTRL,
12577              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12578         udelay(100);
12579
12580         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12581             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12582                 tg3_flag_set(tp, NVRAM);
12583
12584                 if (tg3_nvram_lock(tp)) {
12585                         netdev_warn(tp->dev,
12586                                     "Cannot get nvram lock, %s failed\n",
12587                                     __func__);
12588                         return;
12589                 }
12590                 tg3_enable_nvram_access(tp);
12591
12592                 tp->nvram_size = 0;
12593
12594                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12595                         tg3_get_5752_nvram_info(tp);
12596                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12597                         tg3_get_5755_nvram_info(tp);
12598                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12599                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12600                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12601                         tg3_get_5787_nvram_info(tp);
12602                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12603                         tg3_get_5761_nvram_info(tp);
12604                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12605                         tg3_get_5906_nvram_info(tp);
12606                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12607                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12608                         tg3_get_57780_nvram_info(tp);
12609                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12610                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12611                         tg3_get_5717_nvram_info(tp);
12612                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12613                         tg3_get_5720_nvram_info(tp);
12614                 else
12615                         tg3_get_nvram_info(tp);
12616
12617                 if (tp->nvram_size == 0)
12618                         tg3_get_nvram_size(tp);
12619
12620                 tg3_disable_nvram_access(tp);
12621                 tg3_nvram_unlock(tp);
12622
12623         } else {
12624                 tg3_flag_clear(tp, NVRAM);
12625                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12626
12627                 tg3_get_eeprom_size(tp);
12628         }
12629 }
12630
12631 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12632                                     u32 offset, u32 len, u8 *buf)
12633 {
12634         int i, j, rc = 0;
12635         u32 val;
12636
12637         for (i = 0; i < len; i += 4) {
12638                 u32 addr;
12639                 __be32 data;
12640
12641                 addr = offset + i;
12642
12643                 memcpy(&data, buf + i, 4);
12644
12645                 /*
12646                  * The SEEPROM interface expects the data to always be opposite
12647                  * the native endian format.  We accomplish this by reversing
12648                  * all the operations that would have been performed on the
12649                  * data from a call to tg3_nvram_read_be32().
12650                  */
12651                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12652
12653                 val = tr32(GRC_EEPROM_ADDR);
12654                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12655
12656                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12657                         EEPROM_ADDR_READ);
12658                 tw32(GRC_EEPROM_ADDR, val |
12659                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12660                         (addr & EEPROM_ADDR_ADDR_MASK) |
12661                         EEPROM_ADDR_START |
12662                         EEPROM_ADDR_WRITE);
12663
12664                 for (j = 0; j < 1000; j++) {
12665                         val = tr32(GRC_EEPROM_ADDR);
12666
12667                         if (val & EEPROM_ADDR_COMPLETE)
12668                                 break;
12669                         msleep(1);
12670                 }
12671                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12672                         rc = -EBUSY;
12673                         break;
12674                 }
12675         }
12676
12677         return rc;
12678 }
12679
12680 /* offset and length are dword aligned */
12681 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12682                 u8 *buf)
12683 {
12684         int ret = 0;
12685         u32 pagesize = tp->nvram_pagesize;
12686         u32 pagemask = pagesize - 1;
12687         u32 nvram_cmd;
12688         u8 *tmp;
12689
12690         tmp = kmalloc(pagesize, GFP_KERNEL);
12691         if (tmp == NULL)
12692                 return -ENOMEM;
12693
12694         while (len) {
12695                 int j;
12696                 u32 phy_addr, page_off, size;
12697
12698                 phy_addr = offset & ~pagemask;
12699
12700                 for (j = 0; j < pagesize; j += 4) {
12701                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12702                                                   (__be32 *) (tmp + j));
12703                         if (ret)
12704                                 break;
12705                 }
12706                 if (ret)
12707                         break;
12708
12709                 page_off = offset & pagemask;
12710                 size = pagesize;
12711                 if (len < size)
12712                         size = len;
12713
12714                 len -= size;
12715
12716                 memcpy(tmp + page_off, buf, size);
12717
12718                 offset = offset + (pagesize - page_off);
12719
12720                 tg3_enable_nvram_access(tp);
12721
12722                 /*
12723                  * Before we can erase the flash page, we need
12724                  * to issue a special "write enable" command.
12725                  */
12726                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12727
12728                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12729                         break;
12730
12731                 /* Erase the target page */
12732                 tw32(NVRAM_ADDR, phy_addr);
12733
12734                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12735                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12736
12737                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12738                         break;
12739
12740                 /* Issue another write enable to start the write. */
12741                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12742
12743                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12744                         break;
12745
12746                 for (j = 0; j < pagesize; j += 4) {
12747                         __be32 data;
12748
12749                         data = *((__be32 *) (tmp + j));
12750
12751                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12752
12753                         tw32(NVRAM_ADDR, phy_addr + j);
12754
12755                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12756                                 NVRAM_CMD_WR;
12757
12758                         if (j == 0)
12759                                 nvram_cmd |= NVRAM_CMD_FIRST;
12760                         else if (j == (pagesize - 4))
12761                                 nvram_cmd |= NVRAM_CMD_LAST;
12762
12763                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12764                                 break;
12765                 }
12766                 if (ret)
12767                         break;
12768         }
12769
12770         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12771         tg3_nvram_exec_cmd(tp, nvram_cmd);
12772
12773         kfree(tmp);
12774
12775         return ret;
12776 }
12777
12778 /* offset and length are dword aligned */
12779 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12780                 u8 *buf)
12781 {
12782         int i, ret = 0;
12783
12784         for (i = 0; i < len; i += 4, offset += 4) {
12785                 u32 page_off, phy_addr, nvram_cmd;
12786                 __be32 data;
12787
12788                 memcpy(&data, buf + i, 4);
12789                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12790
12791                 page_off = offset % tp->nvram_pagesize;
12792
12793                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12794
12795                 tw32(NVRAM_ADDR, phy_addr);
12796
12797                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12798
12799                 if (page_off == 0 || i == 0)
12800                         nvram_cmd |= NVRAM_CMD_FIRST;
12801                 if (page_off == (tp->nvram_pagesize - 4))
12802                         nvram_cmd |= NVRAM_CMD_LAST;
12803
12804                 if (i == (len - 4))
12805                         nvram_cmd |= NVRAM_CMD_LAST;
12806
12807                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12808                     !tg3_flag(tp, 5755_PLUS) &&
12809                     (tp->nvram_jedecnum == JEDEC_ST) &&
12810                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12811
12812                         if ((ret = tg3_nvram_exec_cmd(tp,
12813                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12814                                 NVRAM_CMD_DONE)))
12815
12816                                 break;
12817                 }
12818                 if (!tg3_flag(tp, FLASH)) {
12819                         /* We always do complete word writes to eeprom. */
12820                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12821                 }
12822
12823                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12824                         break;
12825         }
12826         return ret;
12827 }
12828
12829 /* offset and length are dword aligned */
12830 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12831 {
12832         int ret;
12833
12834         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12835                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12836                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12837                 udelay(40);
12838         }
12839
12840         if (!tg3_flag(tp, NVRAM)) {
12841                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12842         } else {
12843                 u32 grc_mode;
12844
12845                 ret = tg3_nvram_lock(tp);
12846                 if (ret)
12847                         return ret;
12848
12849                 tg3_enable_nvram_access(tp);
12850                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12851                         tw32(NVRAM_WRITE1, 0x406);
12852
12853                 grc_mode = tr32(GRC_MODE);
12854                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12855
12856                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12857                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12858                                 buf);
12859                 } else {
12860                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12861                                 buf);
12862                 }
12863
12864                 grc_mode = tr32(GRC_MODE);
12865                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12866
12867                 tg3_disable_nvram_access(tp);
12868                 tg3_nvram_unlock(tp);
12869         }
12870
12871         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12872                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12873                 udelay(40);
12874         }
12875
12876         return ret;
12877 }
12878
12879 struct subsys_tbl_ent {
12880         u16 subsys_vendor, subsys_devid;
12881         u32 phy_id;
12882 };
12883
12884 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12885         /* Broadcom boards. */
12886         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12887           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12888         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12889           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12890         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12891           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12892         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12893           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12894         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12896         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12898         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12900         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12902         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12904         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12906         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12908
12909         /* 3com boards. */
12910         { TG3PCI_SUBVENDOR_ID_3COM,
12911           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12912         { TG3PCI_SUBVENDOR_ID_3COM,
12913           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12914         { TG3PCI_SUBVENDOR_ID_3COM,
12915           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12916         { TG3PCI_SUBVENDOR_ID_3COM,
12917           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12918         { TG3PCI_SUBVENDOR_ID_3COM,
12919           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12920
12921         /* DELL boards. */
12922         { TG3PCI_SUBVENDOR_ID_DELL,
12923           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12924         { TG3PCI_SUBVENDOR_ID_DELL,
12925           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12926         { TG3PCI_SUBVENDOR_ID_DELL,
12927           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12928         { TG3PCI_SUBVENDOR_ID_DELL,
12929           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12930
12931         /* Compaq boards. */
12932         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12933           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12934         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12935           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12936         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12937           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12938         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12939           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12940         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12942
12943         /* IBM boards. */
12944         { TG3PCI_SUBVENDOR_ID_IBM,
12945           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12946 };
12947
12948 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12949 {
12950         int i;
12951
12952         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12953                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12954                      tp->pdev->subsystem_vendor) &&
12955                     (subsys_id_to_phy_id[i].subsys_devid ==
12956                      tp->pdev->subsystem_device))
12957                         return &subsys_id_to_phy_id[i];
12958         }
12959         return NULL;
12960 }
12961
12962 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12963 {
12964         u32 val;
12965
12966         tp->phy_id = TG3_PHY_ID_INVALID;
12967         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12968
12969         /* Assume an onboard device and WOL capable by default.  */
12970         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12971         tg3_flag_set(tp, WOL_CAP);
12972
12973         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12974                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12975                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12976                         tg3_flag_set(tp, IS_NIC);
12977                 }
12978                 val = tr32(VCPU_CFGSHDW);
12979                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12980                         tg3_flag_set(tp, ASPM_WORKAROUND);
12981                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12982                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12983                         tg3_flag_set(tp, WOL_ENABLE);
12984                         device_set_wakeup_enable(&tp->pdev->dev, true);
12985                 }
12986                 goto done;
12987         }
12988
12989         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12990         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12991                 u32 nic_cfg, led_cfg;
12992                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12993                 int eeprom_phy_serdes = 0;
12994
12995                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12996                 tp->nic_sram_data_cfg = nic_cfg;
12997
12998                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12999                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13000                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13001                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13002                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13003                     (ver > 0) && (ver < 0x100))
13004                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13005
13006                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13007                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13008
13009                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13010                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13011                         eeprom_phy_serdes = 1;
13012
13013                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13014                 if (nic_phy_id != 0) {
13015                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13016                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13017
13018                         eeprom_phy_id  = (id1 >> 16) << 10;
13019                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13020                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13021                 } else
13022                         eeprom_phy_id = 0;
13023
13024                 tp->phy_id = eeprom_phy_id;
13025                 if (eeprom_phy_serdes) {
13026                         if (!tg3_flag(tp, 5705_PLUS))
13027                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13028                         else
13029                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13030                 }
13031
13032                 if (tg3_flag(tp, 5750_PLUS))
13033                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13034                                     SHASTA_EXT_LED_MODE_MASK);
13035                 else
13036                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13037
13038                 switch (led_cfg) {
13039                 default:
13040                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13041                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13042                         break;
13043
13044                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13045                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13046                         break;
13047
13048                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13049                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13050
13051                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13052                          * read on some older 5700/5701 bootcode.
13053                          */
13054                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13055                             ASIC_REV_5700 ||
13056                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13057                             ASIC_REV_5701)
13058                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13059
13060                         break;
13061
13062                 case SHASTA_EXT_LED_SHARED:
13063                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13064                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13065                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13066                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13067                                                  LED_CTRL_MODE_PHY_2);
13068                         break;
13069
13070                 case SHASTA_EXT_LED_MAC:
13071                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13072                         break;
13073
13074                 case SHASTA_EXT_LED_COMBO:
13075                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13076                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13077                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13078                                                  LED_CTRL_MODE_PHY_2);
13079                         break;
13080
13081                 }
13082
13083                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13084                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13085                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13086                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13087
13088                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13089                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13090
13091                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13092                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13093                         if ((tp->pdev->subsystem_vendor ==
13094                              PCI_VENDOR_ID_ARIMA) &&
13095                             (tp->pdev->subsystem_device == 0x205a ||
13096                              tp->pdev->subsystem_device == 0x2063))
13097                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13098                 } else {
13099                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13100                         tg3_flag_set(tp, IS_NIC);
13101                 }
13102
13103                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13104                         tg3_flag_set(tp, ENABLE_ASF);
13105                         if (tg3_flag(tp, 5750_PLUS))
13106                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13107                 }
13108
13109                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13110                     tg3_flag(tp, 5750_PLUS))
13111                         tg3_flag_set(tp, ENABLE_APE);
13112
13113                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13114                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13115                         tg3_flag_clear(tp, WOL_CAP);
13116
13117                 if (tg3_flag(tp, WOL_CAP) &&
13118                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13119                         tg3_flag_set(tp, WOL_ENABLE);
13120                         device_set_wakeup_enable(&tp->pdev->dev, true);
13121                 }
13122
13123                 if (cfg2 & (1 << 17))
13124                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13125
13126                 /* serdes signal pre-emphasis in register 0x590 set by */
13127                 /* bootcode if bit 18 is set */
13128                 if (cfg2 & (1 << 18))
13129                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13130
13131                 if ((tg3_flag(tp, 57765_PLUS) ||
13132                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13133                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13134                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13135                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13136
13137                 if (tg3_flag(tp, PCI_EXPRESS) &&
13138                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13139                     !tg3_flag(tp, 57765_PLUS)) {
13140                         u32 cfg3;
13141
13142                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13143                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13144                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13145                 }
13146
13147                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13148                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13149                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13150                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13151                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13152                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13153         }
13154 done:
13155         if (tg3_flag(tp, WOL_CAP))
13156                 device_set_wakeup_enable(&tp->pdev->dev,
13157                                          tg3_flag(tp, WOL_ENABLE));
13158         else
13159                 device_set_wakeup_capable(&tp->pdev->dev, false);
13160 }
13161
13162 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13163 {
13164         int i;
13165         u32 val;
13166
13167         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13168         tw32(OTP_CTRL, cmd);
13169
13170         /* Wait for up to 1 ms for command to execute. */
13171         for (i = 0; i < 100; i++) {
13172                 val = tr32(OTP_STATUS);
13173                 if (val & OTP_STATUS_CMD_DONE)
13174                         break;
13175                 udelay(10);
13176         }
13177
13178         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13179 }
13180
13181 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13182  * configuration is a 32-bit value that straddles the alignment boundary.
13183  * We do two 32-bit reads and then shift and merge the results.
13184  */
13185 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13186 {
13187         u32 bhalf_otp, thalf_otp;
13188
13189         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13190
13191         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13192                 return 0;
13193
13194         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13195
13196         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13197                 return 0;
13198
13199         thalf_otp = tr32(OTP_READ_DATA);
13200
13201         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13202
13203         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13204                 return 0;
13205
13206         bhalf_otp = tr32(OTP_READ_DATA);
13207
13208         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13209 }
13210
13211 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13212 {
13213         u32 adv = ADVERTISED_Autoneg |
13214                   ADVERTISED_Pause;
13215
13216         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13217                 adv |= ADVERTISED_1000baseT_Half |
13218                        ADVERTISED_1000baseT_Full;
13219
13220         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13221                 adv |= ADVERTISED_100baseT_Half |
13222                        ADVERTISED_100baseT_Full |
13223                        ADVERTISED_10baseT_Half |
13224                        ADVERTISED_10baseT_Full |
13225                        ADVERTISED_TP;
13226         else
13227                 adv |= ADVERTISED_FIBRE;
13228
13229         tp->link_config.advertising = adv;
13230         tp->link_config.speed = SPEED_INVALID;
13231         tp->link_config.duplex = DUPLEX_INVALID;
13232         tp->link_config.autoneg = AUTONEG_ENABLE;
13233         tp->link_config.active_speed = SPEED_INVALID;
13234         tp->link_config.active_duplex = DUPLEX_INVALID;
13235         tp->link_config.orig_speed = SPEED_INVALID;
13236         tp->link_config.orig_duplex = DUPLEX_INVALID;
13237         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13238 }
13239
13240 static int __devinit tg3_phy_probe(struct tg3 *tp)
13241 {
13242         u32 hw_phy_id_1, hw_phy_id_2;
13243         u32 hw_phy_id, hw_phy_id_masked;
13244         int err;
13245
13246         /* flow control autonegotiation is default behavior */
13247         tg3_flag_set(tp, PAUSE_AUTONEG);
13248         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13249
13250         if (tg3_flag(tp, USE_PHYLIB))
13251                 return tg3_phy_init(tp);
13252
13253         /* Reading the PHY ID register can conflict with ASF
13254          * firmware access to the PHY hardware.
13255          */
13256         err = 0;
13257         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13258                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13259         } else {
13260                 /* Now read the physical PHY_ID from the chip and verify
13261                  * that it is sane.  If it doesn't look good, we fall back
13262                  * to either the hard-coded table based PHY_ID and failing
13263                  * that the value found in the eeprom area.
13264                  */
13265                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13266                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13267
13268                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13269                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13270                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13271
13272                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13273         }
13274
13275         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13276                 tp->phy_id = hw_phy_id;
13277                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13278                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13279                 else
13280                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13281         } else {
13282                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13283                         /* Do nothing, phy ID already set up in
13284                          * tg3_get_eeprom_hw_cfg().
13285                          */
13286                 } else {
13287                         struct subsys_tbl_ent *p;
13288
13289                         /* No eeprom signature?  Try the hardcoded
13290                          * subsys device table.
13291                          */
13292                         p = tg3_lookup_by_subsys(tp);
13293                         if (!p)
13294                                 return -ENODEV;
13295
13296                         tp->phy_id = p->phy_id;
13297                         if (!tp->phy_id ||
13298                             tp->phy_id == TG3_PHY_ID_BCM8002)
13299                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13300                 }
13301         }
13302
13303         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13304             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13305              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13306              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13307               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13308              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13309               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13310                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13311
13312         tg3_phy_init_link_config(tp);
13313
13314         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13315             !tg3_flag(tp, ENABLE_APE) &&
13316             !tg3_flag(tp, ENABLE_ASF)) {
13317                 u32 bmsr, mask;
13318
13319                 tg3_readphy(tp, MII_BMSR, &bmsr);
13320                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13321                     (bmsr & BMSR_LSTATUS))
13322                         goto skip_phy_reset;
13323
13324                 err = tg3_phy_reset(tp);
13325                 if (err)
13326                         return err;
13327
13328                 tg3_phy_set_wirespeed(tp);
13329
13330                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13331                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13332                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13333                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13334                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13335                                             tp->link_config.flowctrl);
13336
13337                         tg3_writephy(tp, MII_BMCR,
13338                                      BMCR_ANENABLE | BMCR_ANRESTART);
13339                 }
13340         }
13341
13342 skip_phy_reset:
13343         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13344                 err = tg3_init_5401phy_dsp(tp);
13345                 if (err)
13346                         return err;
13347
13348                 err = tg3_init_5401phy_dsp(tp);
13349         }
13350
13351         return err;
13352 }
13353
13354 static void __devinit tg3_read_vpd(struct tg3 *tp)
13355 {
13356         u8 *vpd_data;
13357         unsigned int block_end, rosize, len;
13358         u32 vpdlen;
13359         int j, i = 0;
13360
13361         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13362         if (!vpd_data)
13363                 goto out_no_vpd;
13364
13365         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13366         if (i < 0)
13367                 goto out_not_found;
13368
13369         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13370         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13371         i += PCI_VPD_LRDT_TAG_SIZE;
13372
13373         if (block_end > vpdlen)
13374                 goto out_not_found;
13375
13376         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13377                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13378         if (j > 0) {
13379                 len = pci_vpd_info_field_size(&vpd_data[j]);
13380
13381                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13382                 if (j + len > block_end || len != 4 ||
13383                     memcmp(&vpd_data[j], "1028", 4))
13384                         goto partno;
13385
13386                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13387                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13388                 if (j < 0)
13389                         goto partno;
13390
13391                 len = pci_vpd_info_field_size(&vpd_data[j]);
13392
13393                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13394                 if (j + len > block_end)
13395                         goto partno;
13396
13397                 memcpy(tp->fw_ver, &vpd_data[j], len);
13398                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13399         }
13400
13401 partno:
13402         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13403                                       PCI_VPD_RO_KEYWORD_PARTNO);
13404         if (i < 0)
13405                 goto out_not_found;
13406
13407         len = pci_vpd_info_field_size(&vpd_data[i]);
13408
13409         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13410         if (len > TG3_BPN_SIZE ||
13411             (len + i) > vpdlen)
13412                 goto out_not_found;
13413
13414         memcpy(tp->board_part_number, &vpd_data[i], len);
13415
13416 out_not_found:
13417         kfree(vpd_data);
13418         if (tp->board_part_number[0])
13419                 return;
13420
13421 out_no_vpd:
13422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13423                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13424                         strcpy(tp->board_part_number, "BCM5717");
13425                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13426                         strcpy(tp->board_part_number, "BCM5718");
13427                 else
13428                         goto nomatch;
13429         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13430                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13431                         strcpy(tp->board_part_number, "BCM57780");
13432                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13433                         strcpy(tp->board_part_number, "BCM57760");
13434                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13435                         strcpy(tp->board_part_number, "BCM57790");
13436                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13437                         strcpy(tp->board_part_number, "BCM57788");
13438                 else
13439                         goto nomatch;
13440         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13441                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13442                         strcpy(tp->board_part_number, "BCM57761");
13443                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13444                         strcpy(tp->board_part_number, "BCM57765");
13445                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13446                         strcpy(tp->board_part_number, "BCM57781");
13447                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13448                         strcpy(tp->board_part_number, "BCM57785");
13449                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13450                         strcpy(tp->board_part_number, "BCM57791");
13451                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13452                         strcpy(tp->board_part_number, "BCM57795");
13453                 else
13454                         goto nomatch;
13455         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13456                 strcpy(tp->board_part_number, "BCM95906");
13457         } else {
13458 nomatch:
13459                 strcpy(tp->board_part_number, "none");
13460         }
13461 }
13462
13463 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13464 {
13465         u32 val;
13466
13467         if (tg3_nvram_read(tp, offset, &val) ||
13468             (val & 0xfc000000) != 0x0c000000 ||
13469             tg3_nvram_read(tp, offset + 4, &val) ||
13470             val != 0)
13471                 return 0;
13472
13473         return 1;
13474 }
13475
13476 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13477 {
13478         u32 val, offset, start, ver_offset;
13479         int i, dst_off;
13480         bool newver = false;
13481
13482         if (tg3_nvram_read(tp, 0xc, &offset) ||
13483             tg3_nvram_read(tp, 0x4, &start))
13484                 return;
13485
13486         offset = tg3_nvram_logical_addr(tp, offset);
13487
13488         if (tg3_nvram_read(tp, offset, &val))
13489                 return;
13490
13491         if ((val & 0xfc000000) == 0x0c000000) {
13492                 if (tg3_nvram_read(tp, offset + 4, &val))
13493                         return;
13494
13495                 if (val == 0)
13496                         newver = true;
13497         }
13498
13499         dst_off = strlen(tp->fw_ver);
13500
13501         if (newver) {
13502                 if (TG3_VER_SIZE - dst_off < 16 ||
13503                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13504                         return;
13505
13506                 offset = offset + ver_offset - start;
13507                 for (i = 0; i < 16; i += 4) {
13508                         __be32 v;
13509                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13510                                 return;
13511
13512                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13513                 }
13514         } else {
13515                 u32 major, minor;
13516
13517                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13518                         return;
13519
13520                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13521                         TG3_NVM_BCVER_MAJSFT;
13522                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13523                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13524                          "v%d.%02d", major, minor);
13525         }
13526 }
13527
13528 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13529 {
13530         u32 val, major, minor;
13531
13532         /* Use native endian representation */
13533         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13534                 return;
13535
13536         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13537                 TG3_NVM_HWSB_CFG1_MAJSFT;
13538         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13539                 TG3_NVM_HWSB_CFG1_MINSFT;
13540
13541         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13542 }
13543
13544 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13545 {
13546         u32 offset, major, minor, build;
13547
13548         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13549
13550         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13551                 return;
13552
13553         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13554         case TG3_EEPROM_SB_REVISION_0:
13555                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13556                 break;
13557         case TG3_EEPROM_SB_REVISION_2:
13558                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13559                 break;
13560         case TG3_EEPROM_SB_REVISION_3:
13561                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13562                 break;
13563         case TG3_EEPROM_SB_REVISION_4:
13564                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13565                 break;
13566         case TG3_EEPROM_SB_REVISION_5:
13567                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13568                 break;
13569         case TG3_EEPROM_SB_REVISION_6:
13570                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13571                 break;
13572         default:
13573                 return;
13574         }
13575
13576         if (tg3_nvram_read(tp, offset, &val))
13577                 return;
13578
13579         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13580                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13581         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13582                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13583         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13584
13585         if (minor > 99 || build > 26)
13586                 return;
13587
13588         offset = strlen(tp->fw_ver);
13589         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13590                  " v%d.%02d", major, minor);
13591
13592         if (build > 0) {
13593                 offset = strlen(tp->fw_ver);
13594                 if (offset < TG3_VER_SIZE - 1)
13595                         tp->fw_ver[offset] = 'a' + build - 1;
13596         }
13597 }
13598
13599 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13600 {
13601         u32 val, offset, start;
13602         int i, vlen;
13603
13604         for (offset = TG3_NVM_DIR_START;
13605              offset < TG3_NVM_DIR_END;
13606              offset += TG3_NVM_DIRENT_SIZE) {
13607                 if (tg3_nvram_read(tp, offset, &val))
13608                         return;
13609
13610                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13611                         break;
13612         }
13613
13614         if (offset == TG3_NVM_DIR_END)
13615                 return;
13616
13617         if (!tg3_flag(tp, 5705_PLUS))
13618                 start = 0x08000000;
13619         else if (tg3_nvram_read(tp, offset - 4, &start))
13620                 return;
13621
13622         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13623             !tg3_fw_img_is_valid(tp, offset) ||
13624             tg3_nvram_read(tp, offset + 8, &val))
13625                 return;
13626
13627         offset += val - start;
13628
13629         vlen = strlen(tp->fw_ver);
13630
13631         tp->fw_ver[vlen++] = ',';
13632         tp->fw_ver[vlen++] = ' ';
13633
13634         for (i = 0; i < 4; i++) {
13635                 __be32 v;
13636                 if (tg3_nvram_read_be32(tp, offset, &v))
13637                         return;
13638
13639                 offset += sizeof(v);
13640
13641                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13642                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13643                         break;
13644                 }
13645
13646                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13647                 vlen += sizeof(v);
13648         }
13649 }
13650
13651 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13652 {
13653         int vlen;
13654         u32 apedata;
13655         char *fwtype;
13656
13657         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13658                 return;
13659
13660         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13661         if (apedata != APE_SEG_SIG_MAGIC)
13662                 return;
13663
13664         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13665         if (!(apedata & APE_FW_STATUS_READY))
13666                 return;
13667
13668         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13669
13670         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13671                 tg3_flag_set(tp, APE_HAS_NCSI);
13672                 fwtype = "NCSI";
13673         } else {
13674                 fwtype = "DASH";
13675         }
13676
13677         vlen = strlen(tp->fw_ver);
13678
13679         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13680                  fwtype,
13681                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13682                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13683                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13684                  (apedata & APE_FW_VERSION_BLDMSK));
13685 }
13686
13687 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13688 {
13689         u32 val;
13690         bool vpd_vers = false;
13691
13692         if (tp->fw_ver[0] != 0)
13693                 vpd_vers = true;
13694
13695         if (tg3_flag(tp, NO_NVRAM)) {
13696                 strcat(tp->fw_ver, "sb");
13697                 return;
13698         }
13699
13700         if (tg3_nvram_read(tp, 0, &val))
13701                 return;
13702
13703         if (val == TG3_EEPROM_MAGIC)
13704                 tg3_read_bc_ver(tp);
13705         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13706                 tg3_read_sb_ver(tp, val);
13707         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13708                 tg3_read_hwsb_ver(tp);
13709         else
13710                 return;
13711
13712         if (vpd_vers)
13713                 goto done;
13714
13715         if (tg3_flag(tp, ENABLE_APE)) {
13716                 if (tg3_flag(tp, ENABLE_ASF))
13717                         tg3_read_dash_ver(tp);
13718         } else if (tg3_flag(tp, ENABLE_ASF)) {
13719                 tg3_read_mgmtfw_ver(tp);
13720         }
13721
13722 done:
13723         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13724 }
13725
13726 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13727
13728 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13729 {
13730         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13731                 return TG3_RX_RET_MAX_SIZE_5717;
13732         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13733                 return TG3_RX_RET_MAX_SIZE_5700;
13734         else
13735                 return TG3_RX_RET_MAX_SIZE_5705;
13736 }
13737
13738 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13739         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13740         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13741         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13742         { },
13743 };
13744
13745 static int __devinit tg3_get_invariants(struct tg3 *tp)
13746 {
13747         u32 misc_ctrl_reg;
13748         u32 pci_state_reg, grc_misc_cfg;
13749         u32 val;
13750         u16 pci_cmd;
13751         int err;
13752
13753         /* Force memory write invalidate off.  If we leave it on,
13754          * then on 5700_BX chips we have to enable a workaround.
13755          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13756          * to match the cacheline size.  The Broadcom driver have this
13757          * workaround but turns MWI off all the times so never uses
13758          * it.  This seems to suggest that the workaround is insufficient.
13759          */
13760         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13761         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13762         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13763
13764         /* Important! -- Make sure register accesses are byteswapped
13765          * correctly.  Also, for those chips that require it, make
13766          * sure that indirect register accesses are enabled before
13767          * the first operation.
13768          */
13769         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13770                               &misc_ctrl_reg);
13771         tp->misc_host_ctrl |= (misc_ctrl_reg &
13772                                MISC_HOST_CTRL_CHIPREV);
13773         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13774                                tp->misc_host_ctrl);
13775
13776         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13777                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13779                 u32 prod_id_asic_rev;
13780
13781                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13782                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13783                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13784                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13785                         pci_read_config_dword(tp->pdev,
13786                                               TG3PCI_GEN2_PRODID_ASICREV,
13787                                               &prod_id_asic_rev);
13788                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13789                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13790                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13791                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13792                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13793                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13794                         pci_read_config_dword(tp->pdev,
13795                                               TG3PCI_GEN15_PRODID_ASICREV,
13796                                               &prod_id_asic_rev);
13797                 else
13798                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13799                                               &prod_id_asic_rev);
13800
13801                 tp->pci_chip_rev_id = prod_id_asic_rev;
13802         }
13803
13804         /* Wrong chip ID in 5752 A0. This code can be removed later
13805          * as A0 is not in production.
13806          */
13807         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13808                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13809
13810         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13811          * we need to disable memory and use config. cycles
13812          * only to access all registers. The 5702/03 chips
13813          * can mistakenly decode the special cycles from the
13814          * ICH chipsets as memory write cycles, causing corruption
13815          * of register and memory space. Only certain ICH bridges
13816          * will drive special cycles with non-zero data during the
13817          * address phase which can fall within the 5703's address
13818          * range. This is not an ICH bug as the PCI spec allows
13819          * non-zero address during special cycles. However, only
13820          * these ICH bridges are known to drive non-zero addresses
13821          * during special cycles.
13822          *
13823          * Since special cycles do not cross PCI bridges, we only
13824          * enable this workaround if the 5703 is on the secondary
13825          * bus of these ICH bridges.
13826          */
13827         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13828             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13829                 static struct tg3_dev_id {
13830                         u32     vendor;
13831                         u32     device;
13832                         u32     rev;
13833                 } ich_chipsets[] = {
13834                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13835                           PCI_ANY_ID },
13836                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13837                           PCI_ANY_ID },
13838                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13839                           0xa },
13840                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13841                           PCI_ANY_ID },
13842                         { },
13843                 };
13844                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13845                 struct pci_dev *bridge = NULL;
13846
13847                 while (pci_id->vendor != 0) {
13848                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13849                                                 bridge);
13850                         if (!bridge) {
13851                                 pci_id++;
13852                                 continue;
13853                         }
13854                         if (pci_id->rev != PCI_ANY_ID) {
13855                                 if (bridge->revision > pci_id->rev)
13856                                         continue;
13857                         }
13858                         if (bridge->subordinate &&
13859                             (bridge->subordinate->number ==
13860                              tp->pdev->bus->number)) {
13861                                 tg3_flag_set(tp, ICH_WORKAROUND);
13862                                 pci_dev_put(bridge);
13863                                 break;
13864                         }
13865                 }
13866         }
13867
13868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13869                 static struct tg3_dev_id {
13870                         u32     vendor;
13871                         u32     device;
13872                 } bridge_chipsets[] = {
13873                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13874                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13875                         { },
13876                 };
13877                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13878                 struct pci_dev *bridge = NULL;
13879
13880                 while (pci_id->vendor != 0) {
13881                         bridge = pci_get_device(pci_id->vendor,
13882                                                 pci_id->device,
13883                                                 bridge);
13884                         if (!bridge) {
13885                                 pci_id++;
13886                                 continue;
13887                         }
13888                         if (bridge->subordinate &&
13889                             (bridge->subordinate->number <=
13890                              tp->pdev->bus->number) &&
13891                             (bridge->subordinate->subordinate >=
13892                              tp->pdev->bus->number)) {
13893                                 tg3_flag_set(tp, 5701_DMA_BUG);
13894                                 pci_dev_put(bridge);
13895                                 break;
13896                         }
13897                 }
13898         }
13899
13900         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13901          * DMA addresses > 40-bit. This bridge may have other additional
13902          * 57xx devices behind it in some 4-port NIC designs for example.
13903          * Any tg3 device found behind the bridge will also need the 40-bit
13904          * DMA workaround.
13905          */
13906         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13907             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13908                 tg3_flag_set(tp, 5780_CLASS);
13909                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13910                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13911         } else {
13912                 struct pci_dev *bridge = NULL;
13913
13914                 do {
13915                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13916                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13917                                                 bridge);
13918                         if (bridge && bridge->subordinate &&
13919                             (bridge->subordinate->number <=
13920                              tp->pdev->bus->number) &&
13921                             (bridge->subordinate->subordinate >=
13922                              tp->pdev->bus->number)) {
13923                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13924                                 pci_dev_put(bridge);
13925                                 break;
13926                         }
13927                 } while (bridge);
13928         }
13929
13930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13932                 tp->pdev_peer = tg3_find_peer(tp);
13933
13934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13937                 tg3_flag_set(tp, 5717_PLUS);
13938
13939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13940             tg3_flag(tp, 5717_PLUS))
13941                 tg3_flag_set(tp, 57765_PLUS);
13942
13943         /* Intentionally exclude ASIC_REV_5906 */
13944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13945             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13946             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13950             tg3_flag(tp, 57765_PLUS))
13951                 tg3_flag_set(tp, 5755_PLUS);
13952
13953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13954             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13955             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13956             tg3_flag(tp, 5755_PLUS) ||
13957             tg3_flag(tp, 5780_CLASS))
13958                 tg3_flag_set(tp, 5750_PLUS);
13959
13960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13961             tg3_flag(tp, 5750_PLUS))
13962                 tg3_flag_set(tp, 5705_PLUS);
13963
13964         /* Determine TSO capabilities */
13965         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13966                 ; /* Do nothing. HW bug. */
13967         else if (tg3_flag(tp, 57765_PLUS))
13968                 tg3_flag_set(tp, HW_TSO_3);
13969         else if (tg3_flag(tp, 5755_PLUS) ||
13970                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13971                 tg3_flag_set(tp, HW_TSO_2);
13972         else if (tg3_flag(tp, 5750_PLUS)) {
13973                 tg3_flag_set(tp, HW_TSO_1);
13974                 tg3_flag_set(tp, TSO_BUG);
13975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13976                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13977                         tg3_flag_clear(tp, TSO_BUG);
13978         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13979                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13980                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13981                         tg3_flag_set(tp, TSO_BUG);
13982                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13983                         tp->fw_needed = FIRMWARE_TG3TSO5;
13984                 else
13985                         tp->fw_needed = FIRMWARE_TG3TSO;
13986         }
13987
13988         /* Selectively allow TSO based on operating conditions */
13989         if (tg3_flag(tp, HW_TSO_1) ||
13990             tg3_flag(tp, HW_TSO_2) ||
13991             tg3_flag(tp, HW_TSO_3) ||
13992             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13993                 tg3_flag_set(tp, TSO_CAPABLE);
13994         else {
13995                 tg3_flag_clear(tp, TSO_CAPABLE);
13996                 tg3_flag_clear(tp, TSO_BUG);
13997                 tp->fw_needed = NULL;
13998         }
13999
14000         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14001                 tp->fw_needed = FIRMWARE_TG3;
14002
14003         tp->irq_max = 1;
14004
14005         if (tg3_flag(tp, 5750_PLUS)) {
14006                 tg3_flag_set(tp, SUPPORT_MSI);
14007                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14008                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14009                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14010                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14011                      tp->pdev_peer == tp->pdev))
14012                         tg3_flag_clear(tp, SUPPORT_MSI);
14013
14014                 if (tg3_flag(tp, 5755_PLUS) ||
14015                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14016                         tg3_flag_set(tp, 1SHOT_MSI);
14017                 }
14018
14019                 if (tg3_flag(tp, 57765_PLUS)) {
14020                         tg3_flag_set(tp, SUPPORT_MSIX);
14021                         tp->irq_max = TG3_IRQ_MAX_VECS;
14022                 }
14023         }
14024
14025         if (tg3_flag(tp, 5755_PLUS))
14026                 tg3_flag_set(tp, SHORT_DMA_BUG);
14027
14028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14029                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14030
14031         if (tg3_flag(tp, 5717_PLUS))
14032                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14033
14034         if (tg3_flag(tp, 57765_PLUS) &&
14035             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14036                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14037
14038         if (!tg3_flag(tp, 5705_PLUS) ||
14039             tg3_flag(tp, 5780_CLASS) ||
14040             tg3_flag(tp, USE_JUMBO_BDFLAG))
14041                 tg3_flag_set(tp, JUMBO_CAPABLE);
14042
14043         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14044                               &pci_state_reg);
14045
14046         if (pci_is_pcie(tp->pdev)) {
14047                 u16 lnkctl;
14048
14049                 tg3_flag_set(tp, PCI_EXPRESS);
14050
14051                 tp->pcie_readrq = 4096;
14052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14053                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14054                         tp->pcie_readrq = 2048;
14055
14056                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14057
14058                 pci_read_config_word(tp->pdev,
14059                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14060                                      &lnkctl);
14061                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14062                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14063                             ASIC_REV_5906) {
14064                                 tg3_flag_clear(tp, HW_TSO_2);
14065                                 tg3_flag_clear(tp, TSO_CAPABLE);
14066                         }
14067                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14068                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14069                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14070                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14071                                 tg3_flag_set(tp, CLKREQ_BUG);
14072                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14073                         tg3_flag_set(tp, L1PLLPD_EN);
14074                 }
14075         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14076                 /* BCM5785 devices are effectively PCIe devices, and should
14077                  * follow PCIe codepaths, but do not have a PCIe capabilities
14078                  * section.
14079                  */
14080                 tg3_flag_set(tp, PCI_EXPRESS);
14081         } else if (!tg3_flag(tp, 5705_PLUS) ||
14082                    tg3_flag(tp, 5780_CLASS)) {
14083                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14084                 if (!tp->pcix_cap) {
14085                         dev_err(&tp->pdev->dev,
14086                                 "Cannot find PCI-X capability, aborting\n");
14087                         return -EIO;
14088                 }
14089
14090                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14091                         tg3_flag_set(tp, PCIX_MODE);
14092         }
14093
14094         /* If we have an AMD 762 or VIA K8T800 chipset, write
14095          * reordering to the mailbox registers done by the host
14096          * controller can cause major troubles.  We read back from
14097          * every mailbox register write to force the writes to be
14098          * posted to the chip in order.
14099          */
14100         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14101             !tg3_flag(tp, PCI_EXPRESS))
14102                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14103
14104         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14105                              &tp->pci_cacheline_sz);
14106         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14107                              &tp->pci_lat_timer);
14108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14109             tp->pci_lat_timer < 64) {
14110                 tp->pci_lat_timer = 64;
14111                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14112                                       tp->pci_lat_timer);
14113         }
14114
14115         /* Important! -- It is critical that the PCI-X hw workaround
14116          * situation is decided before the first MMIO register access.
14117          */
14118         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14119                 /* 5700 BX chips need to have their TX producer index
14120                  * mailboxes written twice to workaround a bug.
14121                  */
14122                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14123
14124                 /* If we are in PCI-X mode, enable register write workaround.
14125                  *
14126                  * The workaround is to use indirect register accesses
14127                  * for all chip writes not to mailbox registers.
14128                  */
14129                 if (tg3_flag(tp, PCIX_MODE)) {
14130                         u32 pm_reg;
14131
14132                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14133
14134                         /* The chip can have it's power management PCI config
14135                          * space registers clobbered due to this bug.
14136                          * So explicitly force the chip into D0 here.
14137                          */
14138                         pci_read_config_dword(tp->pdev,
14139                                               tp->pm_cap + PCI_PM_CTRL,
14140                                               &pm_reg);
14141                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14142                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14143                         pci_write_config_dword(tp->pdev,
14144                                                tp->pm_cap + PCI_PM_CTRL,
14145                                                pm_reg);
14146
14147                         /* Also, force SERR#/PERR# in PCI command. */
14148                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14149                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14150                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14151                 }
14152         }
14153
14154         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14155                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14156         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14157                 tg3_flag_set(tp, PCI_32BIT);
14158
14159         /* Chip-specific fixup from Broadcom driver */
14160         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14161             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14162                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14163                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14164         }
14165
14166         /* Default fast path register access methods */
14167         tp->read32 = tg3_read32;
14168         tp->write32 = tg3_write32;
14169         tp->read32_mbox = tg3_read32;
14170         tp->write32_mbox = tg3_write32;
14171         tp->write32_tx_mbox = tg3_write32;
14172         tp->write32_rx_mbox = tg3_write32;
14173
14174         /* Various workaround register access methods */
14175         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14176                 tp->write32 = tg3_write_indirect_reg32;
14177         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14178                  (tg3_flag(tp, PCI_EXPRESS) &&
14179                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14180                 /*
14181                  * Back to back register writes can cause problems on these
14182                  * chips, the workaround is to read back all reg writes
14183                  * except those to mailbox regs.
14184                  *
14185                  * See tg3_write_indirect_reg32().
14186                  */
14187                 tp->write32 = tg3_write_flush_reg32;
14188         }
14189
14190         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14191                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14192                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14193                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14194         }
14195
14196         if (tg3_flag(tp, ICH_WORKAROUND)) {
14197                 tp->read32 = tg3_read_indirect_reg32;
14198                 tp->write32 = tg3_write_indirect_reg32;
14199                 tp->read32_mbox = tg3_read_indirect_mbox;
14200                 tp->write32_mbox = tg3_write_indirect_mbox;
14201                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14202                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14203
14204                 iounmap(tp->regs);
14205                 tp->regs = NULL;
14206
14207                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14208                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14209                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14210         }
14211         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14212                 tp->read32_mbox = tg3_read32_mbox_5906;
14213                 tp->write32_mbox = tg3_write32_mbox_5906;
14214                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14215                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14216         }
14217
14218         if (tp->write32 == tg3_write_indirect_reg32 ||
14219             (tg3_flag(tp, PCIX_MODE) &&
14220              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14221               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14222                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14223
14224         /* The memory arbiter has to be enabled in order for SRAM accesses
14225          * to succeed.  Normally on powerup the tg3 chip firmware will make
14226          * sure it is enabled, but other entities such as system netboot
14227          * code might disable it.
14228          */
14229         val = tr32(MEMARB_MODE);
14230         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14231
14232         if (tg3_flag(tp, PCIX_MODE)) {
14233                 pci_read_config_dword(tp->pdev,
14234                                       tp->pcix_cap + PCI_X_STATUS, &val);
14235                 tp->pci_fn = val & 0x7;
14236         } else {
14237                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14238         }
14239
14240         /* Get eeprom hw config before calling tg3_set_power_state().
14241          * In particular, the TG3_FLAG_IS_NIC flag must be
14242          * determined before calling tg3_set_power_state() so that
14243          * we know whether or not to switch out of Vaux power.
14244          * When the flag is set, it means that GPIO1 is used for eeprom
14245          * write protect and also implies that it is a LOM where GPIOs
14246          * are not used to switch power.
14247          */
14248         tg3_get_eeprom_hw_cfg(tp);
14249
14250         if (tg3_flag(tp, ENABLE_APE)) {
14251                 /* Allow reads and writes to the
14252                  * APE register and memory space.
14253                  */
14254                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14255                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14256                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14257                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14258                                        pci_state_reg);
14259
14260                 tg3_ape_lock_init(tp);
14261         }
14262
14263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14265             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14266             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14267             tg3_flag(tp, 57765_PLUS))
14268                 tg3_flag_set(tp, CPMU_PRESENT);
14269
14270         /* Set up tp->grc_local_ctrl before calling
14271          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14272          * will bring 5700's external PHY out of reset.
14273          * It is also used as eeprom write protect on LOMs.
14274          */
14275         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14277             tg3_flag(tp, EEPROM_WRITE_PROT))
14278                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14279                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14280         /* Unused GPIO3 must be driven as output on 5752 because there
14281          * are no pull-up resistors on unused GPIO pins.
14282          */
14283         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14284                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14285
14286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14289                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14290
14291         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14292             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14293                 /* Turn off the debug UART. */
14294                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14295                 if (tg3_flag(tp, IS_NIC))
14296                         /* Keep VMain power. */
14297                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14298                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14299         }
14300
14301         /* Switch out of Vaux if it is a NIC */
14302         tg3_pwrsrc_switch_to_vmain(tp);
14303
14304         /* Derive initial jumbo mode from MTU assigned in
14305          * ether_setup() via the alloc_etherdev() call
14306          */
14307         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14308                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14309
14310         /* Determine WakeOnLan speed to use. */
14311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14312             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14313             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14314             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14315                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14316         } else {
14317                 tg3_flag_set(tp, WOL_SPEED_100MB);
14318         }
14319
14320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14321                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14322
14323         /* A few boards don't want Ethernet@WireSpeed phy feature */
14324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14325             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14326              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14327              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14328             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14329             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14330                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14331
14332         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14333             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14334                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14335         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14336                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14337
14338         if (tg3_flag(tp, 5705_PLUS) &&
14339             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14340             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14341             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14342             !tg3_flag(tp, 57765_PLUS)) {
14343                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14344                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14345                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14346                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14347                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14348                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14349                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14350                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14351                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14352                 } else
14353                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14354         }
14355
14356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14357             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14358                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14359                 if (tp->phy_otp == 0)
14360                         tp->phy_otp = TG3_OTP_DEFAULT;
14361         }
14362
14363         if (tg3_flag(tp, CPMU_PRESENT))
14364                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14365         else
14366                 tp->mi_mode = MAC_MI_MODE_BASE;
14367
14368         tp->coalesce_mode = 0;
14369         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14370             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14371                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14372
14373         /* Set these bits to enable statistics workaround. */
14374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14375             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14376             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14377                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14378                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14379         }
14380
14381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14383                 tg3_flag_set(tp, USE_PHYLIB);
14384
14385         err = tg3_mdio_init(tp);
14386         if (err)
14387                 return err;
14388
14389         /* Initialize data/descriptor byte/word swapping. */
14390         val = tr32(GRC_MODE);
14391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14392                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14393                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14394                         GRC_MODE_B2HRX_ENABLE |
14395                         GRC_MODE_HTX2B_ENABLE |
14396                         GRC_MODE_HOST_STACKUP);
14397         else
14398                 val &= GRC_MODE_HOST_STACKUP;
14399
14400         tw32(GRC_MODE, val | tp->grc_mode);
14401
14402         tg3_switch_clocks(tp);
14403
14404         /* Clear this out for sanity. */
14405         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14406
14407         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14408                               &pci_state_reg);
14409         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14410             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14411                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14412
14413                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14414                     chiprevid == CHIPREV_ID_5701_B0 ||
14415                     chiprevid == CHIPREV_ID_5701_B2 ||
14416                     chiprevid == CHIPREV_ID_5701_B5) {
14417                         void __iomem *sram_base;
14418
14419                         /* Write some dummy words into the SRAM status block
14420                          * area, see if it reads back correctly.  If the return
14421                          * value is bad, force enable the PCIX workaround.
14422                          */
14423                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14424
14425                         writel(0x00000000, sram_base);
14426                         writel(0x00000000, sram_base + 4);
14427                         writel(0xffffffff, sram_base + 4);
14428                         if (readl(sram_base) != 0x00000000)
14429                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14430                 }
14431         }
14432
14433         udelay(50);
14434         tg3_nvram_init(tp);
14435
14436         grc_misc_cfg = tr32(GRC_MISC_CFG);
14437         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14438
14439         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14440             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14441              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14442                 tg3_flag_set(tp, IS_5788);
14443
14444         if (!tg3_flag(tp, IS_5788) &&
14445             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14446                 tg3_flag_set(tp, TAGGED_STATUS);
14447         if (tg3_flag(tp, TAGGED_STATUS)) {
14448                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14449                                       HOSTCC_MODE_CLRTICK_TXBD);
14450
14451                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14452                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14453                                        tp->misc_host_ctrl);
14454         }
14455
14456         /* Preserve the APE MAC_MODE bits */
14457         if (tg3_flag(tp, ENABLE_APE))
14458                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14459         else
14460                 tp->mac_mode = 0;
14461
14462         /* these are limited to 10/100 only */
14463         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14464              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14465             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14466              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14467              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14468               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14469               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14470             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14471              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14472               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14473               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14474             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14475             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14476             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14477             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14478                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14479
14480         err = tg3_phy_probe(tp);
14481         if (err) {
14482                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14483                 /* ... but do not return immediately ... */
14484                 tg3_mdio_fini(tp);
14485         }
14486
14487         tg3_read_vpd(tp);
14488         tg3_read_fw_ver(tp);
14489
14490         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14491                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14492         } else {
14493                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14494                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14495                 else
14496                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14497         }
14498
14499         /* 5700 {AX,BX} chips have a broken status block link
14500          * change bit implementation, so we must use the
14501          * status register in those cases.
14502          */
14503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14504                 tg3_flag_set(tp, USE_LINKCHG_REG);
14505         else
14506                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14507
14508         /* The led_ctrl is set during tg3_phy_probe, here we might
14509          * have to force the link status polling mechanism based
14510          * upon subsystem IDs.
14511          */
14512         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14513             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14514             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14515                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14516                 tg3_flag_set(tp, USE_LINKCHG_REG);
14517         }
14518
14519         /* For all SERDES we poll the MAC status register. */
14520         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14521                 tg3_flag_set(tp, POLL_SERDES);
14522         else
14523                 tg3_flag_clear(tp, POLL_SERDES);
14524
14525         tp->rx_offset = NET_IP_ALIGN;
14526         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14528             tg3_flag(tp, PCIX_MODE)) {
14529                 tp->rx_offset = 0;
14530 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14531                 tp->rx_copy_thresh = ~(u16)0;
14532 #endif
14533         }
14534
14535         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14536         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14537         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14538
14539         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14540
14541         /* Increment the rx prod index on the rx std ring by at most
14542          * 8 for these chips to workaround hw errata.
14543          */
14544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14547                 tp->rx_std_max_post = 8;
14548
14549         if (tg3_flag(tp, ASPM_WORKAROUND))
14550                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14551                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14552
14553         return err;
14554 }
14555
14556 #ifdef CONFIG_SPARC
14557 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14558 {
14559         struct net_device *dev = tp->dev;
14560         struct pci_dev *pdev = tp->pdev;
14561         struct device_node *dp = pci_device_to_OF_node(pdev);
14562         const unsigned char *addr;
14563         int len;
14564
14565         addr = of_get_property(dp, "local-mac-address", &len);
14566         if (addr && len == 6) {
14567                 memcpy(dev->dev_addr, addr, 6);
14568                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14569                 return 0;
14570         }
14571         return -ENODEV;
14572 }
14573
14574 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14575 {
14576         struct net_device *dev = tp->dev;
14577
14578         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14579         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14580         return 0;
14581 }
14582 #endif
14583
14584 static int __devinit tg3_get_device_address(struct tg3 *tp)
14585 {
14586         struct net_device *dev = tp->dev;
14587         u32 hi, lo, mac_offset;
14588         int addr_ok = 0;
14589
14590 #ifdef CONFIG_SPARC
14591         if (!tg3_get_macaddr_sparc(tp))
14592                 return 0;
14593 #endif
14594
14595         mac_offset = 0x7c;
14596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14597             tg3_flag(tp, 5780_CLASS)) {
14598                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14599                         mac_offset = 0xcc;
14600                 if (tg3_nvram_lock(tp))
14601                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14602                 else
14603                         tg3_nvram_unlock(tp);
14604         } else if (tg3_flag(tp, 5717_PLUS)) {
14605                 if (tp->pci_fn & 1)
14606                         mac_offset = 0xcc;
14607                 if (tp->pci_fn > 1)
14608                         mac_offset += 0x18c;
14609         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14610                 mac_offset = 0x10;
14611
14612         /* First try to get it from MAC address mailbox. */
14613         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14614         if ((hi >> 16) == 0x484b) {
14615                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14616                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14617
14618                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14619                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14620                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14621                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14622                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14623
14624                 /* Some old bootcode may report a 0 MAC address in SRAM */
14625                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14626         }
14627         if (!addr_ok) {
14628                 /* Next, try NVRAM. */
14629                 if (!tg3_flag(tp, NO_NVRAM) &&
14630                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14631                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14632                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14633                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14634                 }
14635                 /* Finally just fetch it out of the MAC control regs. */
14636                 else {
14637                         hi = tr32(MAC_ADDR_0_HIGH);
14638                         lo = tr32(MAC_ADDR_0_LOW);
14639
14640                         dev->dev_addr[5] = lo & 0xff;
14641                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14642                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14643                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14644                         dev->dev_addr[1] = hi & 0xff;
14645                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14646                 }
14647         }
14648
14649         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14650 #ifdef CONFIG_SPARC
14651                 if (!tg3_get_default_macaddr_sparc(tp))
14652                         return 0;
14653 #endif
14654                 return -EINVAL;
14655         }
14656         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14657         return 0;
14658 }
14659
14660 #define BOUNDARY_SINGLE_CACHELINE       1
14661 #define BOUNDARY_MULTI_CACHELINE        2
14662
14663 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14664 {
14665         int cacheline_size;
14666         u8 byte;
14667         int goal;
14668
14669         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14670         if (byte == 0)
14671                 cacheline_size = 1024;
14672         else
14673                 cacheline_size = (int) byte * 4;
14674
14675         /* On 5703 and later chips, the boundary bits have no
14676          * effect.
14677          */
14678         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14679             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14680             !tg3_flag(tp, PCI_EXPRESS))
14681                 goto out;
14682
14683 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14684         goal = BOUNDARY_MULTI_CACHELINE;
14685 #else
14686 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14687         goal = BOUNDARY_SINGLE_CACHELINE;
14688 #else
14689         goal = 0;
14690 #endif
14691 #endif
14692
14693         if (tg3_flag(tp, 57765_PLUS)) {
14694                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14695                 goto out;
14696         }
14697
14698         if (!goal)
14699                 goto out;
14700
14701         /* PCI controllers on most RISC systems tend to disconnect
14702          * when a device tries to burst across a cache-line boundary.
14703          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14704          *
14705          * Unfortunately, for PCI-E there are only limited
14706          * write-side controls for this, and thus for reads
14707          * we will still get the disconnects.  We'll also waste
14708          * these PCI cycles for both read and write for chips
14709          * other than 5700 and 5701 which do not implement the
14710          * boundary bits.
14711          */
14712         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14713                 switch (cacheline_size) {
14714                 case 16:
14715                 case 32:
14716                 case 64:
14717                 case 128:
14718                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14719                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14720                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14721                         } else {
14722                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14723                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14724                         }
14725                         break;
14726
14727                 case 256:
14728                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14729                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14730                         break;
14731
14732                 default:
14733                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14734                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14735                         break;
14736                 }
14737         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14738                 switch (cacheline_size) {
14739                 case 16:
14740                 case 32:
14741                 case 64:
14742                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14743                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14744                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14745                                 break;
14746                         }
14747                         /* fallthrough */
14748                 case 128:
14749                 default:
14750                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14751                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14752                         break;
14753                 }
14754         } else {
14755                 switch (cacheline_size) {
14756                 case 16:
14757                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14758                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14759                                         DMA_RWCTRL_WRITE_BNDRY_16);
14760                                 break;
14761                         }
14762                         /* fallthrough */
14763                 case 32:
14764                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14765                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14766                                         DMA_RWCTRL_WRITE_BNDRY_32);
14767                                 break;
14768                         }
14769                         /* fallthrough */
14770                 case 64:
14771                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14772                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14773                                         DMA_RWCTRL_WRITE_BNDRY_64);
14774                                 break;
14775                         }
14776                         /* fallthrough */
14777                 case 128:
14778                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14779                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14780                                         DMA_RWCTRL_WRITE_BNDRY_128);
14781                                 break;
14782                         }
14783                         /* fallthrough */
14784                 case 256:
14785                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14786                                 DMA_RWCTRL_WRITE_BNDRY_256);
14787                         break;
14788                 case 512:
14789                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14790                                 DMA_RWCTRL_WRITE_BNDRY_512);
14791                         break;
14792                 case 1024:
14793                 default:
14794                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14795                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14796                         break;
14797                 }
14798         }
14799
14800 out:
14801         return val;
14802 }
14803
14804 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14805 {
14806         struct tg3_internal_buffer_desc test_desc;
14807         u32 sram_dma_descs;
14808         int i, ret;
14809
14810         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14811
14812         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14813         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14814         tw32(RDMAC_STATUS, 0);
14815         tw32(WDMAC_STATUS, 0);
14816
14817         tw32(BUFMGR_MODE, 0);
14818         tw32(FTQ_RESET, 0);
14819
14820         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14821         test_desc.addr_lo = buf_dma & 0xffffffff;
14822         test_desc.nic_mbuf = 0x00002100;
14823         test_desc.len = size;
14824
14825         /*
14826          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14827          * the *second* time the tg3 driver was getting loaded after an
14828          * initial scan.
14829          *
14830          * Broadcom tells me:
14831          *   ...the DMA engine is connected to the GRC block and a DMA
14832          *   reset may affect the GRC block in some unpredictable way...
14833          *   The behavior of resets to individual blocks has not been tested.
14834          *
14835          * Broadcom noted the GRC reset will also reset all sub-components.
14836          */
14837         if (to_device) {
14838                 test_desc.cqid_sqid = (13 << 8) | 2;
14839
14840                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14841                 udelay(40);
14842         } else {
14843                 test_desc.cqid_sqid = (16 << 8) | 7;
14844
14845                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14846                 udelay(40);
14847         }
14848         test_desc.flags = 0x00000005;
14849
14850         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14851                 u32 val;
14852
14853                 val = *(((u32 *)&test_desc) + i);
14854                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14855                                        sram_dma_descs + (i * sizeof(u32)));
14856                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14857         }
14858         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14859
14860         if (to_device)
14861                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14862         else
14863                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14864
14865         ret = -ENODEV;
14866         for (i = 0; i < 40; i++) {
14867                 u32 val;
14868
14869                 if (to_device)
14870                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14871                 else
14872                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14873                 if ((val & 0xffff) == sram_dma_descs) {
14874                         ret = 0;
14875                         break;
14876                 }
14877
14878                 udelay(100);
14879         }
14880
14881         return ret;
14882 }
14883
14884 #define TEST_BUFFER_SIZE        0x2000
14885
14886 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14887         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14888         { },
14889 };
14890
14891 static int __devinit tg3_test_dma(struct tg3 *tp)
14892 {
14893         dma_addr_t buf_dma;
14894         u32 *buf, saved_dma_rwctrl;
14895         int ret = 0;
14896
14897         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14898                                  &buf_dma, GFP_KERNEL);
14899         if (!buf) {
14900                 ret = -ENOMEM;
14901                 goto out_nofree;
14902         }
14903
14904         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14905                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14906
14907         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14908
14909         if (tg3_flag(tp, 57765_PLUS))
14910                 goto out;
14911
14912         if (tg3_flag(tp, PCI_EXPRESS)) {
14913                 /* DMA read watermark not used on PCIE */
14914                 tp->dma_rwctrl |= 0x00180000;
14915         } else if (!tg3_flag(tp, PCIX_MODE)) {
14916                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14917                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14918                         tp->dma_rwctrl |= 0x003f0000;
14919                 else
14920                         tp->dma_rwctrl |= 0x003f000f;
14921         } else {
14922                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14923                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14924                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14925                         u32 read_water = 0x7;
14926
14927                         /* If the 5704 is behind the EPB bridge, we can
14928                          * do the less restrictive ONE_DMA workaround for
14929                          * better performance.
14930                          */
14931                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14932                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14933                                 tp->dma_rwctrl |= 0x8000;
14934                         else if (ccval == 0x6 || ccval == 0x7)
14935                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14936
14937                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14938                                 read_water = 4;
14939                         /* Set bit 23 to enable PCIX hw bug fix */
14940                         tp->dma_rwctrl |=
14941                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14942                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14943                                 (1 << 23);
14944                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14945                         /* 5780 always in PCIX mode */
14946                         tp->dma_rwctrl |= 0x00144000;
14947                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14948                         /* 5714 always in PCIX mode */
14949                         tp->dma_rwctrl |= 0x00148000;
14950                 } else {
14951                         tp->dma_rwctrl |= 0x001b000f;
14952                 }
14953         }
14954
14955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14957                 tp->dma_rwctrl &= 0xfffffff0;
14958
14959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14961                 /* Remove this if it causes problems for some boards. */
14962                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14963
14964                 /* On 5700/5701 chips, we need to set this bit.
14965                  * Otherwise the chip will issue cacheline transactions
14966                  * to streamable DMA memory with not all the byte
14967                  * enables turned on.  This is an error on several
14968                  * RISC PCI controllers, in particular sparc64.
14969                  *
14970                  * On 5703/5704 chips, this bit has been reassigned
14971                  * a different meaning.  In particular, it is used
14972                  * on those chips to enable a PCI-X workaround.
14973                  */
14974                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14975         }
14976
14977         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14978
14979 #if 0
14980         /* Unneeded, already done by tg3_get_invariants.  */
14981         tg3_switch_clocks(tp);
14982 #endif
14983
14984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14985             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14986                 goto out;
14987
14988         /* It is best to perform DMA test with maximum write burst size
14989          * to expose the 5700/5701 write DMA bug.
14990          */
14991         saved_dma_rwctrl = tp->dma_rwctrl;
14992         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14993         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14994
14995         while (1) {
14996                 u32 *p = buf, i;
14997
14998                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14999                         p[i] = i;
15000
15001                 /* Send the buffer to the chip. */
15002                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15003                 if (ret) {
15004                         dev_err(&tp->pdev->dev,
15005                                 "%s: Buffer write failed. err = %d\n",
15006                                 __func__, ret);
15007                         break;
15008                 }
15009
15010 #if 0
15011                 /* validate data reached card RAM correctly. */
15012                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15013                         u32 val;
15014                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15015                         if (le32_to_cpu(val) != p[i]) {
15016                                 dev_err(&tp->pdev->dev,
15017                                         "%s: Buffer corrupted on device! "
15018                                         "(%d != %d)\n", __func__, val, i);
15019                                 /* ret = -ENODEV here? */
15020                         }
15021                         p[i] = 0;
15022                 }
15023 #endif
15024                 /* Now read it back. */
15025                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15026                 if (ret) {
15027                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15028                                 "err = %d\n", __func__, ret);
15029                         break;
15030                 }
15031
15032                 /* Verify it. */
15033                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15034                         if (p[i] == i)
15035                                 continue;
15036
15037                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15038                             DMA_RWCTRL_WRITE_BNDRY_16) {
15039                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15040                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15041                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15042                                 break;
15043                         } else {
15044                                 dev_err(&tp->pdev->dev,
15045                                         "%s: Buffer corrupted on read back! "
15046                                         "(%d != %d)\n", __func__, p[i], i);
15047                                 ret = -ENODEV;
15048                                 goto out;
15049                         }
15050                 }
15051
15052                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15053                         /* Success. */
15054                         ret = 0;
15055                         break;
15056                 }
15057         }
15058         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15059             DMA_RWCTRL_WRITE_BNDRY_16) {
15060                 /* DMA test passed without adjusting DMA boundary,
15061                  * now look for chipsets that are known to expose the
15062                  * DMA bug without failing the test.
15063                  */
15064                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15065                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15066                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15067                 } else {
15068                         /* Safe to use the calculated DMA boundary. */
15069                         tp->dma_rwctrl = saved_dma_rwctrl;
15070                 }
15071
15072                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15073         }
15074
15075 out:
15076         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15077 out_nofree:
15078         return ret;
15079 }
15080
15081 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15082 {
15083         if (tg3_flag(tp, 57765_PLUS)) {
15084                 tp->bufmgr_config.mbuf_read_dma_low_water =
15085                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15086                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15087                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15088                 tp->bufmgr_config.mbuf_high_water =
15089                         DEFAULT_MB_HIGH_WATER_57765;
15090
15091                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15092                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15093                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15094                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15095                 tp->bufmgr_config.mbuf_high_water_jumbo =
15096                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15097         } else if (tg3_flag(tp, 5705_PLUS)) {
15098                 tp->bufmgr_config.mbuf_read_dma_low_water =
15099                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15100                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15101                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15102                 tp->bufmgr_config.mbuf_high_water =
15103                         DEFAULT_MB_HIGH_WATER_5705;
15104                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15105                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15106                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15107                         tp->bufmgr_config.mbuf_high_water =
15108                                 DEFAULT_MB_HIGH_WATER_5906;
15109                 }
15110
15111                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15112                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15113                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15114                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15115                 tp->bufmgr_config.mbuf_high_water_jumbo =
15116                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15117         } else {
15118                 tp->bufmgr_config.mbuf_read_dma_low_water =
15119                         DEFAULT_MB_RDMA_LOW_WATER;
15120                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15121                         DEFAULT_MB_MACRX_LOW_WATER;
15122                 tp->bufmgr_config.mbuf_high_water =
15123                         DEFAULT_MB_HIGH_WATER;
15124
15125                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15126                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15127                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15128                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15129                 tp->bufmgr_config.mbuf_high_water_jumbo =
15130                         DEFAULT_MB_HIGH_WATER_JUMBO;
15131         }
15132
15133         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15134         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15135 }
15136
15137 static char * __devinit tg3_phy_string(struct tg3 *tp)
15138 {
15139         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15140         case TG3_PHY_ID_BCM5400:        return "5400";
15141         case TG3_PHY_ID_BCM5401:        return "5401";
15142         case TG3_PHY_ID_BCM5411:        return "5411";
15143         case TG3_PHY_ID_BCM5701:        return "5701";
15144         case TG3_PHY_ID_BCM5703:        return "5703";
15145         case TG3_PHY_ID_BCM5704:        return "5704";
15146         case TG3_PHY_ID_BCM5705:        return "5705";
15147         case TG3_PHY_ID_BCM5750:        return "5750";
15148         case TG3_PHY_ID_BCM5752:        return "5752";
15149         case TG3_PHY_ID_BCM5714:        return "5714";
15150         case TG3_PHY_ID_BCM5780:        return "5780";
15151         case TG3_PHY_ID_BCM5755:        return "5755";
15152         case TG3_PHY_ID_BCM5787:        return "5787";
15153         case TG3_PHY_ID_BCM5784:        return "5784";
15154         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15155         case TG3_PHY_ID_BCM5906:        return "5906";
15156         case TG3_PHY_ID_BCM5761:        return "5761";
15157         case TG3_PHY_ID_BCM5718C:       return "5718C";
15158         case TG3_PHY_ID_BCM5718S:       return "5718S";
15159         case TG3_PHY_ID_BCM57765:       return "57765";
15160         case TG3_PHY_ID_BCM5719C:       return "5719C";
15161         case TG3_PHY_ID_BCM5720C:       return "5720C";
15162         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15163         case 0:                 return "serdes";
15164         default:                return "unknown";
15165         }
15166 }
15167
15168 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15169 {
15170         if (tg3_flag(tp, PCI_EXPRESS)) {
15171                 strcpy(str, "PCI Express");
15172                 return str;
15173         } else if (tg3_flag(tp, PCIX_MODE)) {
15174                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15175
15176                 strcpy(str, "PCIX:");
15177
15178                 if ((clock_ctrl == 7) ||
15179                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15180                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15181                         strcat(str, "133MHz");
15182                 else if (clock_ctrl == 0)
15183                         strcat(str, "33MHz");
15184                 else if (clock_ctrl == 2)
15185                         strcat(str, "50MHz");
15186                 else if (clock_ctrl == 4)
15187                         strcat(str, "66MHz");
15188                 else if (clock_ctrl == 6)
15189                         strcat(str, "100MHz");
15190         } else {
15191                 strcpy(str, "PCI:");
15192                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15193                         strcat(str, "66MHz");
15194                 else
15195                         strcat(str, "33MHz");
15196         }
15197         if (tg3_flag(tp, PCI_32BIT))
15198                 strcat(str, ":32-bit");
15199         else
15200                 strcat(str, ":64-bit");
15201         return str;
15202 }
15203
15204 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15205 {
15206         struct pci_dev *peer;
15207         unsigned int func, devnr = tp->pdev->devfn & ~7;
15208
15209         for (func = 0; func < 8; func++) {
15210                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15211                 if (peer && peer != tp->pdev)
15212                         break;
15213                 pci_dev_put(peer);
15214         }
15215         /* 5704 can be configured in single-port mode, set peer to
15216          * tp->pdev in that case.
15217          */
15218         if (!peer) {
15219                 peer = tp->pdev;
15220                 return peer;
15221         }
15222
15223         /*
15224          * We don't need to keep the refcount elevated; there's no way
15225          * to remove one half of this device without removing the other
15226          */
15227         pci_dev_put(peer);
15228
15229         return peer;
15230 }
15231
15232 static void __devinit tg3_init_coal(struct tg3 *tp)
15233 {
15234         struct ethtool_coalesce *ec = &tp->coal;
15235
15236         memset(ec, 0, sizeof(*ec));
15237         ec->cmd = ETHTOOL_GCOALESCE;
15238         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15239         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15240         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15241         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15242         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15243         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15244         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15245         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15246         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15247
15248         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15249                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15250                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15251                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15252                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15253                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15254         }
15255
15256         if (tg3_flag(tp, 5705_PLUS)) {
15257                 ec->rx_coalesce_usecs_irq = 0;
15258                 ec->tx_coalesce_usecs_irq = 0;
15259                 ec->stats_block_coalesce_usecs = 0;
15260         }
15261 }
15262
15263 static const struct net_device_ops tg3_netdev_ops = {
15264         .ndo_open               = tg3_open,
15265         .ndo_stop               = tg3_close,
15266         .ndo_start_xmit         = tg3_start_xmit,
15267         .ndo_get_stats64        = tg3_get_stats64,
15268         .ndo_validate_addr      = eth_validate_addr,
15269         .ndo_set_rx_mode        = tg3_set_rx_mode,
15270         .ndo_set_mac_address    = tg3_set_mac_addr,
15271         .ndo_do_ioctl           = tg3_ioctl,
15272         .ndo_tx_timeout         = tg3_tx_timeout,
15273         .ndo_change_mtu         = tg3_change_mtu,
15274         .ndo_fix_features       = tg3_fix_features,
15275         .ndo_set_features       = tg3_set_features,
15276 #ifdef CONFIG_NET_POLL_CONTROLLER
15277         .ndo_poll_controller    = tg3_poll_controller,
15278 #endif
15279 };
15280
15281 static int __devinit tg3_init_one(struct pci_dev *pdev,
15282                                   const struct pci_device_id *ent)
15283 {
15284         struct net_device *dev;
15285         struct tg3 *tp;
15286         int i, err, pm_cap;
15287         u32 sndmbx, rcvmbx, intmbx;
15288         char str[40];
15289         u64 dma_mask, persist_dma_mask;
15290         u32 features = 0;
15291
15292         printk_once(KERN_INFO "%s\n", version);
15293
15294         err = pci_enable_device(pdev);
15295         if (err) {
15296                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15297                 return err;
15298         }
15299
15300         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15301         if (err) {
15302                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15303                 goto err_out_disable_pdev;
15304         }
15305
15306         pci_set_master(pdev);
15307
15308         /* Find power-management capability. */
15309         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15310         if (pm_cap == 0) {
15311                 dev_err(&pdev->dev,
15312                         "Cannot find Power Management capability, aborting\n");
15313                 err = -EIO;
15314                 goto err_out_free_res;
15315         }
15316
15317         err = pci_set_power_state(pdev, PCI_D0);
15318         if (err) {
15319                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15320                 goto err_out_free_res;
15321         }
15322
15323         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15324         if (!dev) {
15325                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15326                 err = -ENOMEM;
15327                 goto err_out_power_down;
15328         }
15329
15330         SET_NETDEV_DEV(dev, &pdev->dev);
15331
15332         tp = netdev_priv(dev);
15333         tp->pdev = pdev;
15334         tp->dev = dev;
15335         tp->pm_cap = pm_cap;
15336         tp->rx_mode = TG3_DEF_RX_MODE;
15337         tp->tx_mode = TG3_DEF_TX_MODE;
15338
15339         if (tg3_debug > 0)
15340                 tp->msg_enable = tg3_debug;
15341         else
15342                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15343
15344         /* The word/byte swap controls here control register access byte
15345          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15346          * setting below.
15347          */
15348         tp->misc_host_ctrl =
15349                 MISC_HOST_CTRL_MASK_PCI_INT |
15350                 MISC_HOST_CTRL_WORD_SWAP |
15351                 MISC_HOST_CTRL_INDIR_ACCESS |
15352                 MISC_HOST_CTRL_PCISTATE_RW;
15353
15354         /* The NONFRM (non-frame) byte/word swap controls take effect
15355          * on descriptor entries, anything which isn't packet data.
15356          *
15357          * The StrongARM chips on the board (one for tx, one for rx)
15358          * are running in big-endian mode.
15359          */
15360         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15361                         GRC_MODE_WSWAP_NONFRM_DATA);
15362 #ifdef __BIG_ENDIAN
15363         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15364 #endif
15365         spin_lock_init(&tp->lock);
15366         spin_lock_init(&tp->indirect_lock);
15367         INIT_WORK(&tp->reset_task, tg3_reset_task);
15368
15369         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15370         if (!tp->regs) {
15371                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15372                 err = -ENOMEM;
15373                 goto err_out_free_dev;
15374         }
15375
15376         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15377             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15378             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15379             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15380             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15381             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15382             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15383             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15384                 tg3_flag_set(tp, ENABLE_APE);
15385                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15386                 if (!tp->aperegs) {
15387                         dev_err(&pdev->dev,
15388                                 "Cannot map APE registers, aborting\n");
15389                         err = -ENOMEM;
15390                         goto err_out_iounmap;
15391                 }
15392         }
15393
15394         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15395         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15396
15397         dev->ethtool_ops = &tg3_ethtool_ops;
15398         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15399         dev->netdev_ops = &tg3_netdev_ops;
15400         dev->irq = pdev->irq;
15401
15402         err = tg3_get_invariants(tp);
15403         if (err) {
15404                 dev_err(&pdev->dev,
15405                         "Problem fetching invariants of chip, aborting\n");
15406                 goto err_out_apeunmap;
15407         }
15408
15409         /* The EPB bridge inside 5714, 5715, and 5780 and any
15410          * device behind the EPB cannot support DMA addresses > 40-bit.
15411          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15412          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15413          * do DMA address check in tg3_start_xmit().
15414          */
15415         if (tg3_flag(tp, IS_5788))
15416                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15417         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15418                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15419 #ifdef CONFIG_HIGHMEM
15420                 dma_mask = DMA_BIT_MASK(64);
15421 #endif
15422         } else
15423                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15424
15425         /* Configure DMA attributes. */
15426         if (dma_mask > DMA_BIT_MASK(32)) {
15427                 err = pci_set_dma_mask(pdev, dma_mask);
15428                 if (!err) {
15429                         features |= NETIF_F_HIGHDMA;
15430                         err = pci_set_consistent_dma_mask(pdev,
15431                                                           persist_dma_mask);
15432                         if (err < 0) {
15433                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15434                                         "DMA for consistent allocations\n");
15435                                 goto err_out_apeunmap;
15436                         }
15437                 }
15438         }
15439         if (err || dma_mask == DMA_BIT_MASK(32)) {
15440                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15441                 if (err) {
15442                         dev_err(&pdev->dev,
15443                                 "No usable DMA configuration, aborting\n");
15444                         goto err_out_apeunmap;
15445                 }
15446         }
15447
15448         tg3_init_bufmgr_config(tp);
15449
15450         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15451
15452         /* 5700 B0 chips do not support checksumming correctly due
15453          * to hardware bugs.
15454          */
15455         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15456                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15457
15458                 if (tg3_flag(tp, 5755_PLUS))
15459                         features |= NETIF_F_IPV6_CSUM;
15460         }
15461
15462         /* TSO is on by default on chips that support hardware TSO.
15463          * Firmware TSO on older chips gives lower performance, so it
15464          * is off by default, but can be enabled using ethtool.
15465          */
15466         if ((tg3_flag(tp, HW_TSO_1) ||
15467              tg3_flag(tp, HW_TSO_2) ||
15468              tg3_flag(tp, HW_TSO_3)) &&
15469             (features & NETIF_F_IP_CSUM))
15470                 features |= NETIF_F_TSO;
15471         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15472                 if (features & NETIF_F_IPV6_CSUM)
15473                         features |= NETIF_F_TSO6;
15474                 if (tg3_flag(tp, HW_TSO_3) ||
15475                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15476                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15477                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15478                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15479                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15480                         features |= NETIF_F_TSO_ECN;
15481         }
15482
15483         dev->features |= features;
15484         dev->vlan_features |= features;
15485
15486         /*
15487          * Add loopback capability only for a subset of devices that support
15488          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15489          * loopback for the remaining devices.
15490          */
15491         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15492             !tg3_flag(tp, CPMU_PRESENT))
15493                 /* Add the loopback capability */
15494                 features |= NETIF_F_LOOPBACK;
15495
15496         dev->hw_features |= features;
15497
15498         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15499             !tg3_flag(tp, TSO_CAPABLE) &&
15500             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15501                 tg3_flag_set(tp, MAX_RXPEND_64);
15502                 tp->rx_pending = 63;
15503         }
15504
15505         err = tg3_get_device_address(tp);
15506         if (err) {
15507                 dev_err(&pdev->dev,
15508                         "Could not obtain valid ethernet address, aborting\n");
15509                 goto err_out_apeunmap;
15510         }
15511
15512         /*
15513          * Reset chip in case UNDI or EFI driver did not shutdown
15514          * DMA self test will enable WDMAC and we'll see (spurious)
15515          * pending DMA on the PCI bus at that point.
15516          */
15517         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15518             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15519                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15520                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15521         }
15522
15523         err = tg3_test_dma(tp);
15524         if (err) {
15525                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15526                 goto err_out_apeunmap;
15527         }
15528
15529         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15530         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15531         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15532         for (i = 0; i < tp->irq_max; i++) {
15533                 struct tg3_napi *tnapi = &tp->napi[i];
15534
15535                 tnapi->tp = tp;
15536                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15537
15538                 tnapi->int_mbox = intmbx;
15539                 if (i <= 4)
15540                         intmbx += 0x8;
15541                 else
15542                         intmbx += 0x4;
15543
15544                 tnapi->consmbox = rcvmbx;
15545                 tnapi->prodmbox = sndmbx;
15546
15547                 if (i)
15548                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15549                 else
15550                         tnapi->coal_now = HOSTCC_MODE_NOW;
15551
15552                 if (!tg3_flag(tp, SUPPORT_MSIX))
15553                         break;
15554
15555                 /*
15556                  * If we support MSIX, we'll be using RSS.  If we're using
15557                  * RSS, the first vector only handles link interrupts and the
15558                  * remaining vectors handle rx and tx interrupts.  Reuse the
15559                  * mailbox values for the next iteration.  The values we setup
15560                  * above are still useful for the single vectored mode.
15561                  */
15562                 if (!i)
15563                         continue;
15564
15565                 rcvmbx += 0x8;
15566
15567                 if (sndmbx & 0x4)
15568                         sndmbx -= 0x4;
15569                 else
15570                         sndmbx += 0xc;
15571         }
15572
15573         tg3_init_coal(tp);
15574
15575         pci_set_drvdata(pdev, dev);
15576
15577         if (tg3_flag(tp, 5717_PLUS)) {
15578                 /* Resume a low-power mode */
15579                 tg3_frob_aux_power(tp, false);
15580         }
15581
15582         err = register_netdev(dev);
15583         if (err) {
15584                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15585                 goto err_out_apeunmap;
15586         }
15587
15588         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15589                     tp->board_part_number,
15590                     tp->pci_chip_rev_id,
15591                     tg3_bus_string(tp, str),
15592                     dev->dev_addr);
15593
15594         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15595                 struct phy_device *phydev;
15596                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15597                 netdev_info(dev,
15598                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15599                             phydev->drv->name, dev_name(&phydev->dev));
15600         } else {
15601                 char *ethtype;
15602
15603                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15604                         ethtype = "10/100Base-TX";
15605                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15606                         ethtype = "1000Base-SX";
15607                 else
15608                         ethtype = "10/100/1000Base-T";
15609
15610                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15611                             "(WireSpeed[%d], EEE[%d])\n",
15612                             tg3_phy_string(tp), ethtype,
15613                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15614                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15615         }
15616
15617         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15618                     (dev->features & NETIF_F_RXCSUM) != 0,
15619                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15620                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15621                     tg3_flag(tp, ENABLE_ASF) != 0,
15622                     tg3_flag(tp, TSO_CAPABLE) != 0);
15623         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15624                     tp->dma_rwctrl,
15625                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15626                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15627
15628         pci_save_state(pdev);
15629
15630         return 0;
15631
15632 err_out_apeunmap:
15633         if (tp->aperegs) {
15634                 iounmap(tp->aperegs);
15635                 tp->aperegs = NULL;
15636         }
15637
15638 err_out_iounmap:
15639         if (tp->regs) {
15640                 iounmap(tp->regs);
15641                 tp->regs = NULL;
15642         }
15643
15644 err_out_free_dev:
15645         free_netdev(dev);
15646
15647 err_out_power_down:
15648         pci_set_power_state(pdev, PCI_D3hot);
15649
15650 err_out_free_res:
15651         pci_release_regions(pdev);
15652
15653 err_out_disable_pdev:
15654         pci_disable_device(pdev);
15655         pci_set_drvdata(pdev, NULL);
15656         return err;
15657 }
15658
15659 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15660 {
15661         struct net_device *dev = pci_get_drvdata(pdev);
15662
15663         if (dev) {
15664                 struct tg3 *tp = netdev_priv(dev);
15665
15666                 if (tp->fw)
15667                         release_firmware(tp->fw);
15668
15669                 cancel_work_sync(&tp->reset_task);
15670
15671                 if (tg3_flag(tp, USE_PHYLIB)) {
15672                         tg3_phy_fini(tp);
15673                         tg3_mdio_fini(tp);
15674                 }
15675
15676                 unregister_netdev(dev);
15677                 if (tp->aperegs) {
15678                         iounmap(tp->aperegs);
15679                         tp->aperegs = NULL;
15680                 }
15681                 if (tp->regs) {
15682                         iounmap(tp->regs);
15683                         tp->regs = NULL;
15684                 }
15685                 free_netdev(dev);
15686                 pci_release_regions(pdev);
15687                 pci_disable_device(pdev);
15688                 pci_set_drvdata(pdev, NULL);
15689         }
15690 }
15691
15692 #ifdef CONFIG_PM_SLEEP
15693 static int tg3_suspend(struct device *device)
15694 {
15695         struct pci_dev *pdev = to_pci_dev(device);
15696         struct net_device *dev = pci_get_drvdata(pdev);
15697         struct tg3 *tp = netdev_priv(dev);
15698         int err;
15699
15700         if (!netif_running(dev))
15701                 return 0;
15702
15703         flush_work_sync(&tp->reset_task);
15704         tg3_phy_stop(tp);
15705         tg3_netif_stop(tp);
15706
15707         del_timer_sync(&tp->timer);
15708
15709         tg3_full_lock(tp, 1);
15710         tg3_disable_ints(tp);
15711         tg3_full_unlock(tp);
15712
15713         netif_device_detach(dev);
15714
15715         tg3_full_lock(tp, 0);
15716         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15717         tg3_flag_clear(tp, INIT_COMPLETE);
15718         tg3_full_unlock(tp);
15719
15720         err = tg3_power_down_prepare(tp);
15721         if (err) {
15722                 int err2;
15723
15724                 tg3_full_lock(tp, 0);
15725
15726                 tg3_flag_set(tp, INIT_COMPLETE);
15727                 err2 = tg3_restart_hw(tp, 1);
15728                 if (err2)
15729                         goto out;
15730
15731                 tp->timer.expires = jiffies + tp->timer_offset;
15732                 add_timer(&tp->timer);
15733
15734                 netif_device_attach(dev);
15735                 tg3_netif_start(tp);
15736
15737 out:
15738                 tg3_full_unlock(tp);
15739
15740                 if (!err2)
15741                         tg3_phy_start(tp);
15742         }
15743
15744         return err;
15745 }
15746
15747 static int tg3_resume(struct device *device)
15748 {
15749         struct pci_dev *pdev = to_pci_dev(device);
15750         struct net_device *dev = pci_get_drvdata(pdev);
15751         struct tg3 *tp = netdev_priv(dev);
15752         int err;
15753
15754         if (!netif_running(dev))
15755                 return 0;
15756
15757         netif_device_attach(dev);
15758
15759         tg3_full_lock(tp, 0);
15760
15761         tg3_flag_set(tp, INIT_COMPLETE);
15762         err = tg3_restart_hw(tp, 1);
15763         if (err)
15764                 goto out;
15765
15766         tp->timer.expires = jiffies + tp->timer_offset;
15767         add_timer(&tp->timer);
15768
15769         tg3_netif_start(tp);
15770
15771 out:
15772         tg3_full_unlock(tp);
15773
15774         if (!err)
15775                 tg3_phy_start(tp);
15776
15777         return err;
15778 }
15779
15780 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15781 #define TG3_PM_OPS (&tg3_pm_ops)
15782
15783 #else
15784
15785 #define TG3_PM_OPS NULL
15786
15787 #endif /* CONFIG_PM_SLEEP */
15788
15789 /**
15790  * tg3_io_error_detected - called when PCI error is detected
15791  * @pdev: Pointer to PCI device
15792  * @state: The current pci connection state
15793  *
15794  * This function is called after a PCI bus error affecting
15795  * this device has been detected.
15796  */
15797 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15798                                               pci_channel_state_t state)
15799 {
15800         struct net_device *netdev = pci_get_drvdata(pdev);
15801         struct tg3 *tp = netdev_priv(netdev);
15802         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15803
15804         netdev_info(netdev, "PCI I/O error detected\n");
15805
15806         rtnl_lock();
15807
15808         if (!netif_running(netdev))
15809                 goto done;
15810
15811         tg3_phy_stop(tp);
15812
15813         tg3_netif_stop(tp);
15814
15815         del_timer_sync(&tp->timer);
15816         tg3_flag_clear(tp, RESTART_TIMER);
15817
15818         /* Want to make sure that the reset task doesn't run */
15819         cancel_work_sync(&tp->reset_task);
15820         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15821         tg3_flag_clear(tp, RESTART_TIMER);
15822
15823         netif_device_detach(netdev);
15824
15825         /* Clean up software state, even if MMIO is blocked */
15826         tg3_full_lock(tp, 0);
15827         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15828         tg3_full_unlock(tp);
15829
15830 done:
15831         if (state == pci_channel_io_perm_failure)
15832                 err = PCI_ERS_RESULT_DISCONNECT;
15833         else
15834                 pci_disable_device(pdev);
15835
15836         rtnl_unlock();
15837
15838         return err;
15839 }
15840
15841 /**
15842  * tg3_io_slot_reset - called after the pci bus has been reset.
15843  * @pdev: Pointer to PCI device
15844  *
15845  * Restart the card from scratch, as if from a cold-boot.
15846  * At this point, the card has exprienced a hard reset,
15847  * followed by fixups by BIOS, and has its config space
15848  * set up identically to what it was at cold boot.
15849  */
15850 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15851 {
15852         struct net_device *netdev = pci_get_drvdata(pdev);
15853         struct tg3 *tp = netdev_priv(netdev);
15854         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15855         int err;
15856
15857         rtnl_lock();
15858
15859         if (pci_enable_device(pdev)) {
15860                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15861                 goto done;
15862         }
15863
15864         pci_set_master(pdev);
15865         pci_restore_state(pdev);
15866         pci_save_state(pdev);
15867
15868         if (!netif_running(netdev)) {
15869                 rc = PCI_ERS_RESULT_RECOVERED;
15870                 goto done;
15871         }
15872
15873         err = tg3_power_up(tp);
15874         if (err)
15875                 goto done;
15876
15877         rc = PCI_ERS_RESULT_RECOVERED;
15878
15879 done:
15880         rtnl_unlock();
15881
15882         return rc;
15883 }
15884
15885 /**
15886  * tg3_io_resume - called when traffic can start flowing again.
15887  * @pdev: Pointer to PCI device
15888  *
15889  * This callback is called when the error recovery driver tells
15890  * us that its OK to resume normal operation.
15891  */
15892 static void tg3_io_resume(struct pci_dev *pdev)
15893 {
15894         struct net_device *netdev = pci_get_drvdata(pdev);
15895         struct tg3 *tp = netdev_priv(netdev);
15896         int err;
15897
15898         rtnl_lock();
15899
15900         if (!netif_running(netdev))
15901                 goto done;
15902
15903         tg3_full_lock(tp, 0);
15904         tg3_flag_set(tp, INIT_COMPLETE);
15905         err = tg3_restart_hw(tp, 1);
15906         tg3_full_unlock(tp);
15907         if (err) {
15908                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15909                 goto done;
15910         }
15911
15912         netif_device_attach(netdev);
15913
15914         tp->timer.expires = jiffies + tp->timer_offset;
15915         add_timer(&tp->timer);
15916
15917         tg3_netif_start(tp);
15918
15919         tg3_phy_start(tp);
15920
15921 done:
15922         rtnl_unlock();
15923 }
15924
15925 static struct pci_error_handlers tg3_err_handler = {
15926         .error_detected = tg3_io_error_detected,
15927         .slot_reset     = tg3_io_slot_reset,
15928         .resume         = tg3_io_resume
15929 };
15930
15931 static struct pci_driver tg3_driver = {
15932         .name           = DRV_MODULE_NAME,
15933         .id_table       = tg3_pci_tbl,
15934         .probe          = tg3_init_one,
15935         .remove         = __devexit_p(tg3_remove_one),
15936         .err_handler    = &tg3_err_handler,
15937         .driver.pm      = TG3_PM_OPS,
15938 };
15939
15940 static int __init tg3_init(void)
15941 {
15942         return pci_register_driver(&tg3_driver);
15943 }
15944
15945 static void __exit tg3_cleanup(void)
15946 {
15947         pci_unregister_driver(&tg3_driver);
15948 }
15949
15950 module_init(tg3_init);
15951 module_exit(tg3_cleanup);