Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193 #define TG3_TX_BD_DMA_MAX               4096
194
195 #define TG3_RAW_IP_ALIGN 2
196
197 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
198
199 #define FIRMWARE_TG3            "tigon/tg3.bin"
200 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
201 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
202
203 static char version[] __devinitdata =
204         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205
206 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
207 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
208 MODULE_LICENSE("GPL");
209 MODULE_VERSION(DRV_MODULE_VERSION);
210 MODULE_FIRMWARE(FIRMWARE_TG3);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
212 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213
214 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
215 module_param(tg3_debug, int, 0);
216 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217
218 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
297         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
298         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
299         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
300         {}
301 };
302
303 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304
305 static const struct {
306         const char string[ETH_GSTRING_LEN];
307 } ethtool_stats_keys[] = {
308         { "rx_octets" },
309         { "rx_fragments" },
310         { "rx_ucast_packets" },
311         { "rx_mcast_packets" },
312         { "rx_bcast_packets" },
313         { "rx_fcs_errors" },
314         { "rx_align_errors" },
315         { "rx_xon_pause_rcvd" },
316         { "rx_xoff_pause_rcvd" },
317         { "rx_mac_ctrl_rcvd" },
318         { "rx_xoff_entered" },
319         { "rx_frame_too_long_errors" },
320         { "rx_jabbers" },
321         { "rx_undersize_packets" },
322         { "rx_in_length_errors" },
323         { "rx_out_length_errors" },
324         { "rx_64_or_less_octet_packets" },
325         { "rx_65_to_127_octet_packets" },
326         { "rx_128_to_255_octet_packets" },
327         { "rx_256_to_511_octet_packets" },
328         { "rx_512_to_1023_octet_packets" },
329         { "rx_1024_to_1522_octet_packets" },
330         { "rx_1523_to_2047_octet_packets" },
331         { "rx_2048_to_4095_octet_packets" },
332         { "rx_4096_to_8191_octet_packets" },
333         { "rx_8192_to_9022_octet_packets" },
334
335         { "tx_octets" },
336         { "tx_collisions" },
337
338         { "tx_xon_sent" },
339         { "tx_xoff_sent" },
340         { "tx_flow_control" },
341         { "tx_mac_errors" },
342         { "tx_single_collisions" },
343         { "tx_mult_collisions" },
344         { "tx_deferred" },
345         { "tx_excessive_collisions" },
346         { "tx_late_collisions" },
347         { "tx_collide_2times" },
348         { "tx_collide_3times" },
349         { "tx_collide_4times" },
350         { "tx_collide_5times" },
351         { "tx_collide_6times" },
352         { "tx_collide_7times" },
353         { "tx_collide_8times" },
354         { "tx_collide_9times" },
355         { "tx_collide_10times" },
356         { "tx_collide_11times" },
357         { "tx_collide_12times" },
358         { "tx_collide_13times" },
359         { "tx_collide_14times" },
360         { "tx_collide_15times" },
361         { "tx_ucast_packets" },
362         { "tx_mcast_packets" },
363         { "tx_bcast_packets" },
364         { "tx_carrier_sense_errors" },
365         { "tx_discards" },
366         { "tx_errors" },
367
368         { "dma_writeq_full" },
369         { "dma_write_prioq_full" },
370         { "rxbds_empty" },
371         { "rx_discards" },
372         { "rx_errors" },
373         { "rx_threshold_hit" },
374
375         { "dma_readq_full" },
376         { "dma_read_prioq_full" },
377         { "tx_comp_queue_full" },
378
379         { "ring_set_send_prod_index" },
380         { "ring_status_update" },
381         { "nic_irqs" },
382         { "nic_avoided_irqs" },
383         { "nic_tx_threshold_hit" },
384
385         { "mbuf_lwm_thresh_hit" },
386 };
387
388 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
389
390
391 static const struct {
392         const char string[ETH_GSTRING_LEN];
393 } ethtool_test_keys[] = {
394         { "nvram test     (online) " },
395         { "link test      (online) " },
396         { "register test  (offline)" },
397         { "memory test    (offline)" },
398         { "loopback test  (offline)" },
399         { "interrupt test (offline)" },
400 };
401
402 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
403
404
405 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 {
407         writel(val, tp->regs + off);
408 }
409
410 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 {
412         return readl(tp->regs + off);
413 }
414
415 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 {
417         writel(val, tp->aperegs + off);
418 }
419
420 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 {
422         return readl(tp->aperegs + off);
423 }
424
425 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 {
427         unsigned long flags;
428
429         spin_lock_irqsave(&tp->indirect_lock, flags);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
431         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
432         spin_unlock_irqrestore(&tp->indirect_lock, flags);
433 }
434
435 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 {
437         writel(val, tp->regs + off);
438         readl(tp->regs + off);
439 }
440
441 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 {
443         unsigned long flags;
444         u32 val;
445
446         spin_lock_irqsave(&tp->indirect_lock, flags);
447         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
448         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
449         spin_unlock_irqrestore(&tp->indirect_lock, flags);
450         return val;
451 }
452
453 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 {
455         unsigned long flags;
456
457         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
458                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
459                                        TG3_64BIT_REG_LOW, val);
460                 return;
461         }
462         if (off == TG3_RX_STD_PROD_IDX_REG) {
463                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
464                                        TG3_64BIT_REG_LOW, val);
465                 return;
466         }
467
468         spin_lock_irqsave(&tp->indirect_lock, flags);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
470         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472
473         /* In indirect mode when disabling interrupts, we also need
474          * to clear the interrupt bit in the GRC local ctrl register.
475          */
476         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
477             (val == 0x1)) {
478                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
479                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
480         }
481 }
482
483 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 {
485         unsigned long flags;
486         u32 val;
487
488         spin_lock_irqsave(&tp->indirect_lock, flags);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
490         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
491         spin_unlock_irqrestore(&tp->indirect_lock, flags);
492         return val;
493 }
494
495 /* usec_wait specifies the wait time in usec when writing to certain registers
496  * where it is unsafe to read back the register without some delay.
497  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
498  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499  */
500 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 {
502         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
503                 /* Non-posted methods */
504                 tp->write32(tp, off, val);
505         else {
506                 /* Posted method */
507                 tg3_write32(tp, off, val);
508                 if (usec_wait)
509                         udelay(usec_wait);
510                 tp->read32(tp, off);
511         }
512         /* Wait again after the read for the posted method to guarantee that
513          * the wait time is met.
514          */
515         if (usec_wait)
516                 udelay(usec_wait);
517 }
518
519 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 {
521         tp->write32_mbox(tp, off, val);
522         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
523                 tp->read32_mbox(tp, off);
524 }
525
526 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 {
528         void __iomem *mbox = tp->regs + off;
529         writel(val, mbox);
530         if (tg3_flag(tp, TXD_MBOX_HWBUG))
531                 writel(val, mbox);
532         if (tg3_flag(tp, MBOX_WRITE_REORDER))
533                 readl(mbox);
534 }
535
536 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 {
538         return readl(tp->regs + off + GRCMBOX_BASE);
539 }
540
541 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 {
543         writel(val, tp->regs + off + GRCMBOX_BASE);
544 }
545
546 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
547 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
548 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
549 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
550 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
551
552 #define tw32(reg, val)                  tp->write32(tp, reg, val)
553 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
554 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
555 #define tr32(reg)                       tp->read32(tp, reg)
556
557 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 {
559         unsigned long flags;
560
561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
562             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
563                 return;
564
565         spin_lock_irqsave(&tp->indirect_lock, flags);
566         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569
570                 /* Always leave this as zero. */
571                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
572         } else {
573                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
574                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575
576                 /* Always leave this as zero. */
577                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578         }
579         spin_unlock_irqrestore(&tp->indirect_lock, flags);
580 }
581
582 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 {
584         unsigned long flags;
585
586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
587             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
588                 *val = 0;
589                 return;
590         }
591
592         spin_lock_irqsave(&tp->indirect_lock, flags);
593         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
594                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
595                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596
597                 /* Always leave this as zero. */
598                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
599         } else {
600                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
601                 *val = tr32(TG3PCI_MEM_WIN_DATA);
602
603                 /* Always leave this as zero. */
604                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605         }
606         spin_unlock_irqrestore(&tp->indirect_lock, flags);
607 }
608
609 static void tg3_ape_lock_init(struct tg3 *tp)
610 {
611         int i;
612         u32 regbase, bit;
613
614         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
615                 regbase = TG3_APE_LOCK_GRANT;
616         else
617                 regbase = TG3_APE_PER_LOCK_GRANT;
618
619         /* Make sure the driver hasn't any stale locks. */
620         for (i = 0; i < 8; i++) {
621                 if (i == TG3_APE_LOCK_GPIO)
622                         continue;
623                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
624         }
625
626         /* Clear the correct bit of the GPIO lock too. */
627         if (!tp->pci_fn)
628                 bit = APE_LOCK_GRANT_DRIVER;
629         else
630                 bit = 1 << tp->pci_fn;
631
632         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
633 }
634
635 static int tg3_ape_lock(struct tg3 *tp, int locknum)
636 {
637         int i, off;
638         int ret = 0;
639         u32 status, req, gnt, bit;
640
641         if (!tg3_flag(tp, ENABLE_APE))
642                 return 0;
643
644         switch (locknum) {
645         case TG3_APE_LOCK_GPIO:
646                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
647                         return 0;
648         case TG3_APE_LOCK_GRC:
649         case TG3_APE_LOCK_MEM:
650                 break;
651         default:
652                 return -EINVAL;
653         }
654
655         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
656                 req = TG3_APE_LOCK_REQ;
657                 gnt = TG3_APE_LOCK_GRANT;
658         } else {
659                 req = TG3_APE_PER_LOCK_REQ;
660                 gnt = TG3_APE_PER_LOCK_GRANT;
661         }
662
663         off = 4 * locknum;
664
665         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
666                 bit = APE_LOCK_REQ_DRIVER;
667         else
668                 bit = 1 << tp->pci_fn;
669
670         tg3_ape_write32(tp, req + off, bit);
671
672         /* Wait for up to 1 millisecond to acquire lock. */
673         for (i = 0; i < 100; i++) {
674                 status = tg3_ape_read32(tp, gnt + off);
675                 if (status == bit)
676                         break;
677                 udelay(10);
678         }
679
680         if (status != bit) {
681                 /* Revoke the lock request. */
682                 tg3_ape_write32(tp, gnt + off, bit);
683                 ret = -EBUSY;
684         }
685
686         return ret;
687 }
688
689 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
690 {
691         u32 gnt, bit;
692
693         if (!tg3_flag(tp, ENABLE_APE))
694                 return;
695
696         switch (locknum) {
697         case TG3_APE_LOCK_GPIO:
698                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
699                         return;
700         case TG3_APE_LOCK_GRC:
701         case TG3_APE_LOCK_MEM:
702                 break;
703         default:
704                 return;
705         }
706
707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
708                 gnt = TG3_APE_LOCK_GRANT;
709         else
710                 gnt = TG3_APE_PER_LOCK_GRANT;
711
712         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
713                 bit = APE_LOCK_GRANT_DRIVER;
714         else
715                 bit = 1 << tp->pci_fn;
716
717         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
718 }
719
720 static void tg3_disable_ints(struct tg3 *tp)
721 {
722         int i;
723
724         tw32(TG3PCI_MISC_HOST_CTRL,
725              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
726         for (i = 0; i < tp->irq_max; i++)
727                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
728 }
729
730 static void tg3_enable_ints(struct tg3 *tp)
731 {
732         int i;
733
734         tp->irq_sync = 0;
735         wmb();
736
737         tw32(TG3PCI_MISC_HOST_CTRL,
738              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
739
740         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
741         for (i = 0; i < tp->irq_cnt; i++) {
742                 struct tg3_napi *tnapi = &tp->napi[i];
743
744                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
745                 if (tg3_flag(tp, 1SHOT_MSI))
746                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
747
748                 tp->coal_now |= tnapi->coal_now;
749         }
750
751         /* Force an initial interrupt */
752         if (!tg3_flag(tp, TAGGED_STATUS) &&
753             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
754                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
755         else
756                 tw32(HOSTCC_MODE, tp->coal_now);
757
758         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
759 }
760
761 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
762 {
763         struct tg3 *tp = tnapi->tp;
764         struct tg3_hw_status *sblk = tnapi->hw_status;
765         unsigned int work_exists = 0;
766
767         /* check for phy events */
768         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
769                 if (sblk->status & SD_STATUS_LINK_CHG)
770                         work_exists = 1;
771         }
772         /* check for RX/TX work to do */
773         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
774             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
775                 work_exists = 1;
776
777         return work_exists;
778 }
779
780 /* tg3_int_reenable
781  *  similar to tg3_enable_ints, but it accurately determines whether there
782  *  is new work pending and can return without flushing the PIO write
783  *  which reenables interrupts
784  */
785 static void tg3_int_reenable(struct tg3_napi *tnapi)
786 {
787         struct tg3 *tp = tnapi->tp;
788
789         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
790         mmiowb();
791
792         /* When doing tagged status, this work check is unnecessary.
793          * The last_tag we write above tells the chip which piece of
794          * work we've completed.
795          */
796         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
797                 tw32(HOSTCC_MODE, tp->coalesce_mode |
798                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
799 }
800
801 static void tg3_switch_clocks(struct tg3 *tp)
802 {
803         u32 clock_ctrl;
804         u32 orig_clock_ctrl;
805
806         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
807                 return;
808
809         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
810
811         orig_clock_ctrl = clock_ctrl;
812         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
813                        CLOCK_CTRL_CLKRUN_OENABLE |
814                        0x1f);
815         tp->pci_clock_ctrl = clock_ctrl;
816
817         if (tg3_flag(tp, 5705_PLUS)) {
818                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
819                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
820                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
821                 }
822         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
823                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
824                             clock_ctrl |
825                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
826                             40);
827                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
828                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
829                             40);
830         }
831         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
832 }
833
834 #define PHY_BUSY_LOOPS  5000
835
836 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
837 {
838         u32 frame_val;
839         unsigned int loops;
840         int ret;
841
842         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
843                 tw32_f(MAC_MI_MODE,
844                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
845                 udelay(80);
846         }
847
848         *val = 0x0;
849
850         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
851                       MI_COM_PHY_ADDR_MASK);
852         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
853                       MI_COM_REG_ADDR_MASK);
854         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
855
856         tw32_f(MAC_MI_COM, frame_val);
857
858         loops = PHY_BUSY_LOOPS;
859         while (loops != 0) {
860                 udelay(10);
861                 frame_val = tr32(MAC_MI_COM);
862
863                 if ((frame_val & MI_COM_BUSY) == 0) {
864                         udelay(5);
865                         frame_val = tr32(MAC_MI_COM);
866                         break;
867                 }
868                 loops -= 1;
869         }
870
871         ret = -EBUSY;
872         if (loops != 0) {
873                 *val = frame_val & MI_COM_DATA_MASK;
874                 ret = 0;
875         }
876
877         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
878                 tw32_f(MAC_MI_MODE, tp->mi_mode);
879                 udelay(80);
880         }
881
882         return ret;
883 }
884
885 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
886 {
887         u32 frame_val;
888         unsigned int loops;
889         int ret;
890
891         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
892             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
893                 return 0;
894
895         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896                 tw32_f(MAC_MI_MODE,
897                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
898                 udelay(80);
899         }
900
901         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
902                       MI_COM_PHY_ADDR_MASK);
903         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
904                       MI_COM_REG_ADDR_MASK);
905         frame_val |= (val & MI_COM_DATA_MASK);
906         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
907
908         tw32_f(MAC_MI_COM, frame_val);
909
910         loops = PHY_BUSY_LOOPS;
911         while (loops != 0) {
912                 udelay(10);
913                 frame_val = tr32(MAC_MI_COM);
914                 if ((frame_val & MI_COM_BUSY) == 0) {
915                         udelay(5);
916                         frame_val = tr32(MAC_MI_COM);
917                         break;
918                 }
919                 loops -= 1;
920         }
921
922         ret = -EBUSY;
923         if (loops != 0)
924                 ret = 0;
925
926         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
927                 tw32_f(MAC_MI_MODE, tp->mi_mode);
928                 udelay(80);
929         }
930
931         return ret;
932 }
933
934 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
935 {
936         int err;
937
938         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
939         if (err)
940                 goto done;
941
942         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
943         if (err)
944                 goto done;
945
946         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
947                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
948         if (err)
949                 goto done;
950
951         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
952
953 done:
954         return err;
955 }
956
957 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
958 {
959         int err;
960
961         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
962         if (err)
963                 goto done;
964
965         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
966         if (err)
967                 goto done;
968
969         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
970                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
971         if (err)
972                 goto done;
973
974         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
975
976 done:
977         return err;
978 }
979
980 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
981 {
982         int err;
983
984         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
985         if (!err)
986                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
987
988         return err;
989 }
990
991 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
992 {
993         int err;
994
995         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
996         if (!err)
997                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
998
999         return err;
1000 }
1001
1002 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1003 {
1004         int err;
1005
1006         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1007                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1008                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1009         if (!err)
1010                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1011
1012         return err;
1013 }
1014
1015 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1016 {
1017         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1018                 set |= MII_TG3_AUXCTL_MISC_WREN;
1019
1020         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1021 }
1022
1023 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1024         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1025                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1026                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1027
1028 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1029         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1030                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1031
1032 static int tg3_bmcr_reset(struct tg3 *tp)
1033 {
1034         u32 phy_control;
1035         int limit, err;
1036
1037         /* OK, reset it, and poll the BMCR_RESET bit until it
1038          * clears or we time out.
1039          */
1040         phy_control = BMCR_RESET;
1041         err = tg3_writephy(tp, MII_BMCR, phy_control);
1042         if (err != 0)
1043                 return -EBUSY;
1044
1045         limit = 5000;
1046         while (limit--) {
1047                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1048                 if (err != 0)
1049                         return -EBUSY;
1050
1051                 if ((phy_control & BMCR_RESET) == 0) {
1052                         udelay(40);
1053                         break;
1054                 }
1055                 udelay(10);
1056         }
1057         if (limit < 0)
1058                 return -EBUSY;
1059
1060         return 0;
1061 }
1062
1063 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1064 {
1065         struct tg3 *tp = bp->priv;
1066         u32 val;
1067
1068         spin_lock_bh(&tp->lock);
1069
1070         if (tg3_readphy(tp, reg, &val))
1071                 val = -EIO;
1072
1073         spin_unlock_bh(&tp->lock);
1074
1075         return val;
1076 }
1077
1078 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1079 {
1080         struct tg3 *tp = bp->priv;
1081         u32 ret = 0;
1082
1083         spin_lock_bh(&tp->lock);
1084
1085         if (tg3_writephy(tp, reg, val))
1086                 ret = -EIO;
1087
1088         spin_unlock_bh(&tp->lock);
1089
1090         return ret;
1091 }
1092
1093 static int tg3_mdio_reset(struct mii_bus *bp)
1094 {
1095         return 0;
1096 }
1097
1098 static void tg3_mdio_config_5785(struct tg3 *tp)
1099 {
1100         u32 val;
1101         struct phy_device *phydev;
1102
1103         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1104         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1105         case PHY_ID_BCM50610:
1106         case PHY_ID_BCM50610M:
1107                 val = MAC_PHYCFG2_50610_LED_MODES;
1108                 break;
1109         case PHY_ID_BCMAC131:
1110                 val = MAC_PHYCFG2_AC131_LED_MODES;
1111                 break;
1112         case PHY_ID_RTL8211C:
1113                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1114                 break;
1115         case PHY_ID_RTL8201E:
1116                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1117                 break;
1118         default:
1119                 return;
1120         }
1121
1122         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1123                 tw32(MAC_PHYCFG2, val);
1124
1125                 val = tr32(MAC_PHYCFG1);
1126                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1127                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1128                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1129                 tw32(MAC_PHYCFG1, val);
1130
1131                 return;
1132         }
1133
1134         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1135                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1136                        MAC_PHYCFG2_FMODE_MASK_MASK |
1137                        MAC_PHYCFG2_GMODE_MASK_MASK |
1138                        MAC_PHYCFG2_ACT_MASK_MASK   |
1139                        MAC_PHYCFG2_QUAL_MASK_MASK |
1140                        MAC_PHYCFG2_INBAND_ENABLE;
1141
1142         tw32(MAC_PHYCFG2, val);
1143
1144         val = tr32(MAC_PHYCFG1);
1145         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1146                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1147         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1148                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1149                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1150                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1151                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1152         }
1153         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1154                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1155         tw32(MAC_PHYCFG1, val);
1156
1157         val = tr32(MAC_EXT_RGMII_MODE);
1158         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1159                  MAC_RGMII_MODE_RX_QUALITY |
1160                  MAC_RGMII_MODE_RX_ACTIVITY |
1161                  MAC_RGMII_MODE_RX_ENG_DET |
1162                  MAC_RGMII_MODE_TX_ENABLE |
1163                  MAC_RGMII_MODE_TX_LOWPWR |
1164                  MAC_RGMII_MODE_TX_RESET);
1165         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1166                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1167                         val |= MAC_RGMII_MODE_RX_INT_B |
1168                                MAC_RGMII_MODE_RX_QUALITY |
1169                                MAC_RGMII_MODE_RX_ACTIVITY |
1170                                MAC_RGMII_MODE_RX_ENG_DET;
1171                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1172                         val |= MAC_RGMII_MODE_TX_ENABLE |
1173                                MAC_RGMII_MODE_TX_LOWPWR |
1174                                MAC_RGMII_MODE_TX_RESET;
1175         }
1176         tw32(MAC_EXT_RGMII_MODE, val);
1177 }
1178
1179 static void tg3_mdio_start(struct tg3 *tp)
1180 {
1181         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1182         tw32_f(MAC_MI_MODE, tp->mi_mode);
1183         udelay(80);
1184
1185         if (tg3_flag(tp, MDIOBUS_INITED) &&
1186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1187                 tg3_mdio_config_5785(tp);
1188 }
1189
1190 static int tg3_mdio_init(struct tg3 *tp)
1191 {
1192         int i;
1193         u32 reg;
1194         struct phy_device *phydev;
1195
1196         if (tg3_flag(tp, 5717_PLUS)) {
1197                 u32 is_serdes;
1198
1199                 tp->phy_addr = tp->pci_fn + 1;
1200
1201                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1202                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1203                 else
1204                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1205                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1206                 if (is_serdes)
1207                         tp->phy_addr += 7;
1208         } else
1209                 tp->phy_addr = TG3_PHY_MII_ADDR;
1210
1211         tg3_mdio_start(tp);
1212
1213         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1214                 return 0;
1215
1216         tp->mdio_bus = mdiobus_alloc();
1217         if (tp->mdio_bus == NULL)
1218                 return -ENOMEM;
1219
1220         tp->mdio_bus->name     = "tg3 mdio bus";
1221         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1222                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1223         tp->mdio_bus->priv     = tp;
1224         tp->mdio_bus->parent   = &tp->pdev->dev;
1225         tp->mdio_bus->read     = &tg3_mdio_read;
1226         tp->mdio_bus->write    = &tg3_mdio_write;
1227         tp->mdio_bus->reset    = &tg3_mdio_reset;
1228         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1229         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1230
1231         for (i = 0; i < PHY_MAX_ADDR; i++)
1232                 tp->mdio_bus->irq[i] = PHY_POLL;
1233
1234         /* The bus registration will look for all the PHYs on the mdio bus.
1235          * Unfortunately, it does not ensure the PHY is powered up before
1236          * accessing the PHY ID registers.  A chip reset is the
1237          * quickest way to bring the device back to an operational state..
1238          */
1239         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1240                 tg3_bmcr_reset(tp);
1241
1242         i = mdiobus_register(tp->mdio_bus);
1243         if (i) {
1244                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1245                 mdiobus_free(tp->mdio_bus);
1246                 return i;
1247         }
1248
1249         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1250
1251         if (!phydev || !phydev->drv) {
1252                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1253                 mdiobus_unregister(tp->mdio_bus);
1254                 mdiobus_free(tp->mdio_bus);
1255                 return -ENODEV;
1256         }
1257
1258         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1259         case PHY_ID_BCM57780:
1260                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1261                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1262                 break;
1263         case PHY_ID_BCM50610:
1264         case PHY_ID_BCM50610M:
1265                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1266                                      PHY_BRCM_RX_REFCLK_UNUSED |
1267                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1268                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1269                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1270                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1271                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1272                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1273                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1274                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1275                 /* fallthru */
1276         case PHY_ID_RTL8211C:
1277                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1278                 break;
1279         case PHY_ID_RTL8201E:
1280         case PHY_ID_BCMAC131:
1281                 phydev->interface = PHY_INTERFACE_MODE_MII;
1282                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1283                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1284                 break;
1285         }
1286
1287         tg3_flag_set(tp, MDIOBUS_INITED);
1288
1289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1290                 tg3_mdio_config_5785(tp);
1291
1292         return 0;
1293 }
1294
1295 static void tg3_mdio_fini(struct tg3 *tp)
1296 {
1297         if (tg3_flag(tp, MDIOBUS_INITED)) {
1298                 tg3_flag_clear(tp, MDIOBUS_INITED);
1299                 mdiobus_unregister(tp->mdio_bus);
1300                 mdiobus_free(tp->mdio_bus);
1301         }
1302 }
1303
1304 /* tp->lock is held. */
1305 static inline void tg3_generate_fw_event(struct tg3 *tp)
1306 {
1307         u32 val;
1308
1309         val = tr32(GRC_RX_CPU_EVENT);
1310         val |= GRC_RX_CPU_DRIVER_EVENT;
1311         tw32_f(GRC_RX_CPU_EVENT, val);
1312
1313         tp->last_event_jiffies = jiffies;
1314 }
1315
1316 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1317
1318 /* tp->lock is held. */
1319 static void tg3_wait_for_event_ack(struct tg3 *tp)
1320 {
1321         int i;
1322         unsigned int delay_cnt;
1323         long time_remain;
1324
1325         /* If enough time has passed, no wait is necessary. */
1326         time_remain = (long)(tp->last_event_jiffies + 1 +
1327                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1328                       (long)jiffies;
1329         if (time_remain < 0)
1330                 return;
1331
1332         /* Check if we can shorten the wait time. */
1333         delay_cnt = jiffies_to_usecs(time_remain);
1334         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1335                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1336         delay_cnt = (delay_cnt >> 3) + 1;
1337
1338         for (i = 0; i < delay_cnt; i++) {
1339                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1340                         break;
1341                 udelay(8);
1342         }
1343 }
1344
1345 /* tp->lock is held. */
1346 static void tg3_ump_link_report(struct tg3 *tp)
1347 {
1348         u32 reg;
1349         u32 val;
1350
1351         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1352                 return;
1353
1354         tg3_wait_for_event_ack(tp);
1355
1356         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1357
1358         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1359
1360         val = 0;
1361         if (!tg3_readphy(tp, MII_BMCR, &reg))
1362                 val = reg << 16;
1363         if (!tg3_readphy(tp, MII_BMSR, &reg))
1364                 val |= (reg & 0xffff);
1365         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1366
1367         val = 0;
1368         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1369                 val = reg << 16;
1370         if (!tg3_readphy(tp, MII_LPA, &reg))
1371                 val |= (reg & 0xffff);
1372         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1373
1374         val = 0;
1375         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1376                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1377                         val = reg << 16;
1378                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1379                         val |= (reg & 0xffff);
1380         }
1381         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1382
1383         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1384                 val = reg << 16;
1385         else
1386                 val = 0;
1387         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1388
1389         tg3_generate_fw_event(tp);
1390 }
1391
1392 static void tg3_link_report(struct tg3 *tp)
1393 {
1394         if (!netif_carrier_ok(tp->dev)) {
1395                 netif_info(tp, link, tp->dev, "Link is down\n");
1396                 tg3_ump_link_report(tp);
1397         } else if (netif_msg_link(tp)) {
1398                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1399                             (tp->link_config.active_speed == SPEED_1000 ?
1400                              1000 :
1401                              (tp->link_config.active_speed == SPEED_100 ?
1402                               100 : 10)),
1403                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1404                              "full" : "half"));
1405
1406                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1407                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1408                             "on" : "off",
1409                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1410                             "on" : "off");
1411
1412                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1413                         netdev_info(tp->dev, "EEE is %s\n",
1414                                     tp->setlpicnt ? "enabled" : "disabled");
1415
1416                 tg3_ump_link_report(tp);
1417         }
1418 }
1419
1420 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1421 {
1422         u16 miireg;
1423
1424         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1425                 miireg = ADVERTISE_PAUSE_CAP;
1426         else if (flow_ctrl & FLOW_CTRL_TX)
1427                 miireg = ADVERTISE_PAUSE_ASYM;
1428         else if (flow_ctrl & FLOW_CTRL_RX)
1429                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1430         else
1431                 miireg = 0;
1432
1433         return miireg;
1434 }
1435
1436 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1437 {
1438         u16 miireg;
1439
1440         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1441                 miireg = ADVERTISE_1000XPAUSE;
1442         else if (flow_ctrl & FLOW_CTRL_TX)
1443                 miireg = ADVERTISE_1000XPSE_ASYM;
1444         else if (flow_ctrl & FLOW_CTRL_RX)
1445                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1446         else
1447                 miireg = 0;
1448
1449         return miireg;
1450 }
1451
1452 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1453 {
1454         u8 cap = 0;
1455
1456         if (lcladv & ADVERTISE_1000XPAUSE) {
1457                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1458                         if (rmtadv & LPA_1000XPAUSE)
1459                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1460                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1461                                 cap = FLOW_CTRL_RX;
1462                 } else {
1463                         if (rmtadv & LPA_1000XPAUSE)
1464                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1465                 }
1466         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1467                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1468                         cap = FLOW_CTRL_TX;
1469         }
1470
1471         return cap;
1472 }
1473
1474 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1475 {
1476         u8 autoneg;
1477         u8 flowctrl = 0;
1478         u32 old_rx_mode = tp->rx_mode;
1479         u32 old_tx_mode = tp->tx_mode;
1480
1481         if (tg3_flag(tp, USE_PHYLIB))
1482                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1483         else
1484                 autoneg = tp->link_config.autoneg;
1485
1486         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1487                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1488                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1489                 else
1490                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1491         } else
1492                 flowctrl = tp->link_config.flowctrl;
1493
1494         tp->link_config.active_flowctrl = flowctrl;
1495
1496         if (flowctrl & FLOW_CTRL_RX)
1497                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1498         else
1499                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1500
1501         if (old_rx_mode != tp->rx_mode)
1502                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1503
1504         if (flowctrl & FLOW_CTRL_TX)
1505                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1506         else
1507                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1508
1509         if (old_tx_mode != tp->tx_mode)
1510                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1511 }
1512
1513 static void tg3_adjust_link(struct net_device *dev)
1514 {
1515         u8 oldflowctrl, linkmesg = 0;
1516         u32 mac_mode, lcl_adv, rmt_adv;
1517         struct tg3 *tp = netdev_priv(dev);
1518         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1519
1520         spin_lock_bh(&tp->lock);
1521
1522         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1523                                     MAC_MODE_HALF_DUPLEX);
1524
1525         oldflowctrl = tp->link_config.active_flowctrl;
1526
1527         if (phydev->link) {
1528                 lcl_adv = 0;
1529                 rmt_adv = 0;
1530
1531                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1532                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1533                 else if (phydev->speed == SPEED_1000 ||
1534                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1535                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1536                 else
1537                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1538
1539                 if (phydev->duplex == DUPLEX_HALF)
1540                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1541                 else {
1542                         lcl_adv = tg3_advert_flowctrl_1000T(
1543                                   tp->link_config.flowctrl);
1544
1545                         if (phydev->pause)
1546                                 rmt_adv = LPA_PAUSE_CAP;
1547                         if (phydev->asym_pause)
1548                                 rmt_adv |= LPA_PAUSE_ASYM;
1549                 }
1550
1551                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1552         } else
1553                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1554
1555         if (mac_mode != tp->mac_mode) {
1556                 tp->mac_mode = mac_mode;
1557                 tw32_f(MAC_MODE, tp->mac_mode);
1558                 udelay(40);
1559         }
1560
1561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1562                 if (phydev->speed == SPEED_10)
1563                         tw32(MAC_MI_STAT,
1564                              MAC_MI_STAT_10MBPS_MODE |
1565                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1566                 else
1567                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1568         }
1569
1570         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1571                 tw32(MAC_TX_LENGTHS,
1572                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1573                       (6 << TX_LENGTHS_IPG_SHIFT) |
1574                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1575         else
1576                 tw32(MAC_TX_LENGTHS,
1577                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1578                       (6 << TX_LENGTHS_IPG_SHIFT) |
1579                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1580
1581         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1582             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1583             phydev->speed != tp->link_config.active_speed ||
1584             phydev->duplex != tp->link_config.active_duplex ||
1585             oldflowctrl != tp->link_config.active_flowctrl)
1586                 linkmesg = 1;
1587
1588         tp->link_config.active_speed = phydev->speed;
1589         tp->link_config.active_duplex = phydev->duplex;
1590
1591         spin_unlock_bh(&tp->lock);
1592
1593         if (linkmesg)
1594                 tg3_link_report(tp);
1595 }
1596
1597 static int tg3_phy_init(struct tg3 *tp)
1598 {
1599         struct phy_device *phydev;
1600
1601         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1602                 return 0;
1603
1604         /* Bring the PHY back to a known state. */
1605         tg3_bmcr_reset(tp);
1606
1607         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1608
1609         /* Attach the MAC to the PHY. */
1610         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1611                              phydev->dev_flags, phydev->interface);
1612         if (IS_ERR(phydev)) {
1613                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1614                 return PTR_ERR(phydev);
1615         }
1616
1617         /* Mask with MAC supported features. */
1618         switch (phydev->interface) {
1619         case PHY_INTERFACE_MODE_GMII:
1620         case PHY_INTERFACE_MODE_RGMII:
1621                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1622                         phydev->supported &= (PHY_GBIT_FEATURES |
1623                                               SUPPORTED_Pause |
1624                                               SUPPORTED_Asym_Pause);
1625                         break;
1626                 }
1627                 /* fallthru */
1628         case PHY_INTERFACE_MODE_MII:
1629                 phydev->supported &= (PHY_BASIC_FEATURES |
1630                                       SUPPORTED_Pause |
1631                                       SUPPORTED_Asym_Pause);
1632                 break;
1633         default:
1634                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1635                 return -EINVAL;
1636         }
1637
1638         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1639
1640         phydev->advertising = phydev->supported;
1641
1642         return 0;
1643 }
1644
1645 static void tg3_phy_start(struct tg3 *tp)
1646 {
1647         struct phy_device *phydev;
1648
1649         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1650                 return;
1651
1652         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1653
1654         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1655                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1656                 phydev->speed = tp->link_config.orig_speed;
1657                 phydev->duplex = tp->link_config.orig_duplex;
1658                 phydev->autoneg = tp->link_config.orig_autoneg;
1659                 phydev->advertising = tp->link_config.orig_advertising;
1660         }
1661
1662         phy_start(phydev);
1663
1664         phy_start_aneg(phydev);
1665 }
1666
1667 static void tg3_phy_stop(struct tg3 *tp)
1668 {
1669         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1670                 return;
1671
1672         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1673 }
1674
1675 static void tg3_phy_fini(struct tg3 *tp)
1676 {
1677         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1678                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1679                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1680         }
1681 }
1682
1683 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1684 {
1685         u32 phytest;
1686
1687         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1688                 u32 phy;
1689
1690                 tg3_writephy(tp, MII_TG3_FET_TEST,
1691                              phytest | MII_TG3_FET_SHADOW_EN);
1692                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1693                         if (enable)
1694                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1695                         else
1696                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1697                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1698                 }
1699                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1700         }
1701 }
1702
1703 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1704 {
1705         u32 reg;
1706
1707         if (!tg3_flag(tp, 5705_PLUS) ||
1708             (tg3_flag(tp, 5717_PLUS) &&
1709              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1710                 return;
1711
1712         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1713                 tg3_phy_fet_toggle_apd(tp, enable);
1714                 return;
1715         }
1716
1717         reg = MII_TG3_MISC_SHDW_WREN |
1718               MII_TG3_MISC_SHDW_SCR5_SEL |
1719               MII_TG3_MISC_SHDW_SCR5_LPED |
1720               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1721               MII_TG3_MISC_SHDW_SCR5_SDTL |
1722               MII_TG3_MISC_SHDW_SCR5_C125OE;
1723         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1724                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1725
1726         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1727
1728
1729         reg = MII_TG3_MISC_SHDW_WREN |
1730               MII_TG3_MISC_SHDW_APD_SEL |
1731               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1732         if (enable)
1733                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1734
1735         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1736 }
1737
1738 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1739 {
1740         u32 phy;
1741
1742         if (!tg3_flag(tp, 5705_PLUS) ||
1743             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1744                 return;
1745
1746         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1747                 u32 ephy;
1748
1749                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1750                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1751
1752                         tg3_writephy(tp, MII_TG3_FET_TEST,
1753                                      ephy | MII_TG3_FET_SHADOW_EN);
1754                         if (!tg3_readphy(tp, reg, &phy)) {
1755                                 if (enable)
1756                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1757                                 else
1758                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1759                                 tg3_writephy(tp, reg, phy);
1760                         }
1761                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1762                 }
1763         } else {
1764                 int ret;
1765
1766                 ret = tg3_phy_auxctl_read(tp,
1767                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1768                 if (!ret) {
1769                         if (enable)
1770                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1771                         else
1772                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1773                         tg3_phy_auxctl_write(tp,
1774                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1775                 }
1776         }
1777 }
1778
1779 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1780 {
1781         int ret;
1782         u32 val;
1783
1784         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1785                 return;
1786
1787         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1788         if (!ret)
1789                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1790                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1791 }
1792
1793 static void tg3_phy_apply_otp(struct tg3 *tp)
1794 {
1795         u32 otp, phy;
1796
1797         if (!tp->phy_otp)
1798                 return;
1799
1800         otp = tp->phy_otp;
1801
1802         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1803                 return;
1804
1805         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1806         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1807         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1808
1809         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1810               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1811         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1812
1813         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1814         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1815         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1816
1817         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1818         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1819
1820         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1821         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1822
1823         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1824               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1825         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1826
1827         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1828 }
1829
1830 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1831 {
1832         u32 val;
1833
1834         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1835                 return;
1836
1837         tp->setlpicnt = 0;
1838
1839         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1840             current_link_up == 1 &&
1841             tp->link_config.active_duplex == DUPLEX_FULL &&
1842             (tp->link_config.active_speed == SPEED_100 ||
1843              tp->link_config.active_speed == SPEED_1000)) {
1844                 u32 eeectl;
1845
1846                 if (tp->link_config.active_speed == SPEED_1000)
1847                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1848                 else
1849                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1850
1851                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1852
1853                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1854                                   TG3_CL45_D7_EEERES_STAT, &val);
1855
1856                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1857                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1858                         tp->setlpicnt = 2;
1859         }
1860
1861         if (!tp->setlpicnt) {
1862                 if (current_link_up == 1 &&
1863                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1864                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1865                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1866                 }
1867
1868                 val = tr32(TG3_CPMU_EEE_MODE);
1869                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1870         }
1871 }
1872
1873 static void tg3_phy_eee_enable(struct tg3 *tp)
1874 {
1875         u32 val;
1876
1877         if (tp->link_config.active_speed == SPEED_1000 &&
1878             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1880              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1881             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1882                 val = MII_TG3_DSP_TAP26_ALNOKO |
1883                       MII_TG3_DSP_TAP26_RMRXSTO;
1884                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1885                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1886         }
1887
1888         val = tr32(TG3_CPMU_EEE_MODE);
1889         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1890 }
1891
1892 static int tg3_wait_macro_done(struct tg3 *tp)
1893 {
1894         int limit = 100;
1895
1896         while (limit--) {
1897                 u32 tmp32;
1898
1899                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1900                         if ((tmp32 & 0x1000) == 0)
1901                                 break;
1902                 }
1903         }
1904         if (limit < 0)
1905                 return -EBUSY;
1906
1907         return 0;
1908 }
1909
1910 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1911 {
1912         static const u32 test_pat[4][6] = {
1913         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1914         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1915         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1916         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1917         };
1918         int chan;
1919
1920         for (chan = 0; chan < 4; chan++) {
1921                 int i;
1922
1923                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1924                              (chan * 0x2000) | 0x0200);
1925                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1926
1927                 for (i = 0; i < 6; i++)
1928                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1929                                      test_pat[chan][i]);
1930
1931                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1932                 if (tg3_wait_macro_done(tp)) {
1933                         *resetp = 1;
1934                         return -EBUSY;
1935                 }
1936
1937                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1938                              (chan * 0x2000) | 0x0200);
1939                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1940                 if (tg3_wait_macro_done(tp)) {
1941                         *resetp = 1;
1942                         return -EBUSY;
1943                 }
1944
1945                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1946                 if (tg3_wait_macro_done(tp)) {
1947                         *resetp = 1;
1948                         return -EBUSY;
1949                 }
1950
1951                 for (i = 0; i < 6; i += 2) {
1952                         u32 low, high;
1953
1954                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1955                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1956                             tg3_wait_macro_done(tp)) {
1957                                 *resetp = 1;
1958                                 return -EBUSY;
1959                         }
1960                         low &= 0x7fff;
1961                         high &= 0x000f;
1962                         if (low != test_pat[chan][i] ||
1963                             high != test_pat[chan][i+1]) {
1964                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1965                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1966                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1967
1968                                 return -EBUSY;
1969                         }
1970                 }
1971         }
1972
1973         return 0;
1974 }
1975
1976 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1977 {
1978         int chan;
1979
1980         for (chan = 0; chan < 4; chan++) {
1981                 int i;
1982
1983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1984                              (chan * 0x2000) | 0x0200);
1985                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1986                 for (i = 0; i < 6; i++)
1987                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1988                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1989                 if (tg3_wait_macro_done(tp))
1990                         return -EBUSY;
1991         }
1992
1993         return 0;
1994 }
1995
1996 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1997 {
1998         u32 reg32, phy9_orig;
1999         int retries, do_phy_reset, err;
2000
2001         retries = 10;
2002         do_phy_reset = 1;
2003         do {
2004                 if (do_phy_reset) {
2005                         err = tg3_bmcr_reset(tp);
2006                         if (err)
2007                                 return err;
2008                         do_phy_reset = 0;
2009                 }
2010
2011                 /* Disable transmitter and interrupt.  */
2012                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2013                         continue;
2014
2015                 reg32 |= 0x3000;
2016                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2017
2018                 /* Set full-duplex, 1000 mbps.  */
2019                 tg3_writephy(tp, MII_BMCR,
2020                              BMCR_FULLDPLX | BMCR_SPEED1000);
2021
2022                 /* Set to master mode.  */
2023                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2024                         continue;
2025
2026                 tg3_writephy(tp, MII_CTRL1000,
2027                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2028
2029                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2030                 if (err)
2031                         return err;
2032
2033                 /* Block the PHY control access.  */
2034                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2035
2036                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2037                 if (!err)
2038                         break;
2039         } while (--retries);
2040
2041         err = tg3_phy_reset_chanpat(tp);
2042         if (err)
2043                 return err;
2044
2045         tg3_phydsp_write(tp, 0x8005, 0x0000);
2046
2047         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2048         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2049
2050         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2051
2052         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2053
2054         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2055                 reg32 &= ~0x3000;
2056                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2057         } else if (!err)
2058                 err = -EBUSY;
2059
2060         return err;
2061 }
2062
2063 /* This will reset the tigon3 PHY if there is no valid
2064  * link unless the FORCE argument is non-zero.
2065  */
2066 static int tg3_phy_reset(struct tg3 *tp)
2067 {
2068         u32 val, cpmuctrl;
2069         int err;
2070
2071         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2072                 val = tr32(GRC_MISC_CFG);
2073                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2074                 udelay(40);
2075         }
2076         err  = tg3_readphy(tp, MII_BMSR, &val);
2077         err |= tg3_readphy(tp, MII_BMSR, &val);
2078         if (err != 0)
2079                 return -EBUSY;
2080
2081         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2082                 netif_carrier_off(tp->dev);
2083                 tg3_link_report(tp);
2084         }
2085
2086         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2089                 err = tg3_phy_reset_5703_4_5(tp);
2090                 if (err)
2091                         return err;
2092                 goto out;
2093         }
2094
2095         cpmuctrl = 0;
2096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2097             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2098                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2099                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2100                         tw32(TG3_CPMU_CTRL,
2101                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2102         }
2103
2104         err = tg3_bmcr_reset(tp);
2105         if (err)
2106                 return err;
2107
2108         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2109                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2110                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2111
2112                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2113         }
2114
2115         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2116             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2117                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2118                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2119                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2120                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2121                         udelay(40);
2122                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2123                 }
2124         }
2125
2126         if (tg3_flag(tp, 5717_PLUS) &&
2127             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2128                 return 0;
2129
2130         tg3_phy_apply_otp(tp);
2131
2132         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2133                 tg3_phy_toggle_apd(tp, true);
2134         else
2135                 tg3_phy_toggle_apd(tp, false);
2136
2137 out:
2138         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2139             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2140                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2141                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2142                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2143         }
2144
2145         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2148         }
2149
2150         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2151                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2152                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2153                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2154                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2155                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2156                 }
2157         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2158                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2160                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2161                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2162                                 tg3_writephy(tp, MII_TG3_TEST1,
2163                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2164                         } else
2165                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2166
2167                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2168                 }
2169         }
2170
2171         /* Set Extended packet length bit (bit 14) on all chips that */
2172         /* support jumbo frames */
2173         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2174                 /* Cannot do read-modify-write on 5401 */
2175                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2176         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2177                 /* Set bit 14 with read-modify-write to preserve other bits */
2178                 err = tg3_phy_auxctl_read(tp,
2179                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2180                 if (!err)
2181                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2182                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2183         }
2184
2185         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2186          * jumbo frames transmission.
2187          */
2188         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2189                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2190                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2191                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2192         }
2193
2194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2195                 /* adjust output voltage */
2196                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2197         }
2198
2199         tg3_phy_toggle_automdix(tp, 1);
2200         tg3_phy_set_wirespeed(tp);
2201         return 0;
2202 }
2203
2204 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2205 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2206 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2207                                           TG3_GPIO_MSG_NEED_VAUX)
2208 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2209         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2210          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2211          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2212          (TG3_GPIO_MSG_DRVR_PRES << 12))
2213
2214 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2215         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2216          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2217          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2218          (TG3_GPIO_MSG_NEED_VAUX << 12))
2219
2220 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2221 {
2222         u32 status, shift;
2223
2224         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2225             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2226                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2227         else
2228                 status = tr32(TG3_CPMU_DRV_STATUS);
2229
2230         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2231         status &= ~(TG3_GPIO_MSG_MASK << shift);
2232         status |= (newstat << shift);
2233
2234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2236                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2237         else
2238                 tw32(TG3_CPMU_DRV_STATUS, status);
2239
2240         return status >> TG3_APE_GPIO_MSG_SHIFT;
2241 }
2242
2243 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2244 {
2245         if (!tg3_flag(tp, IS_NIC))
2246                 return 0;
2247
2248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2250             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2251                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2252                         return -EIO;
2253
2254                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2255
2256                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2257                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2258
2259                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2260         } else {
2261                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2262                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2263         }
2264
2265         return 0;
2266 }
2267
2268 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2269 {
2270         u32 grc_local_ctrl;
2271
2272         if (!tg3_flag(tp, IS_NIC) ||
2273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2274             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2275                 return;
2276
2277         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2278
2279         tw32_wait_f(GRC_LOCAL_CTRL,
2280                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2281                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2282
2283         tw32_wait_f(GRC_LOCAL_CTRL,
2284                     grc_local_ctrl,
2285                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2286
2287         tw32_wait_f(GRC_LOCAL_CTRL,
2288                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2289                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2290 }
2291
2292 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2293 {
2294         if (!tg3_flag(tp, IS_NIC))
2295                 return;
2296
2297         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2298             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2299                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2300                             (GRC_LCLCTRL_GPIO_OE0 |
2301                              GRC_LCLCTRL_GPIO_OE1 |
2302                              GRC_LCLCTRL_GPIO_OE2 |
2303                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2304                              GRC_LCLCTRL_GPIO_OUTPUT1),
2305                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2306         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2307                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2308                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2309                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2310                                      GRC_LCLCTRL_GPIO_OE1 |
2311                                      GRC_LCLCTRL_GPIO_OE2 |
2312                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2313                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2314                                      tp->grc_local_ctrl;
2315                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2316                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2317
2318                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2319                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2320                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2321
2322                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2323                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2324                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2325         } else {
2326                 u32 no_gpio2;
2327                 u32 grc_local_ctrl = 0;
2328
2329                 /* Workaround to prevent overdrawing Amps. */
2330                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2331                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2332                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2333                                     grc_local_ctrl,
2334                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2335                 }
2336
2337                 /* On 5753 and variants, GPIO2 cannot be used. */
2338                 no_gpio2 = tp->nic_sram_data_cfg &
2339                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2340
2341                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2342                                   GRC_LCLCTRL_GPIO_OE1 |
2343                                   GRC_LCLCTRL_GPIO_OE2 |
2344                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2345                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2346                 if (no_gpio2) {
2347                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2348                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2349                 }
2350                 tw32_wait_f(GRC_LOCAL_CTRL,
2351                             tp->grc_local_ctrl | grc_local_ctrl,
2352                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2353
2354                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2355
2356                 tw32_wait_f(GRC_LOCAL_CTRL,
2357                             tp->grc_local_ctrl | grc_local_ctrl,
2358                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2359
2360                 if (!no_gpio2) {
2361                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2362                         tw32_wait_f(GRC_LOCAL_CTRL,
2363                                     tp->grc_local_ctrl | grc_local_ctrl,
2364                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2365                 }
2366         }
2367 }
2368
2369 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2370 {
2371         u32 msg = 0;
2372
2373         /* Serialize power state transitions */
2374         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2375                 return;
2376
2377         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2378                 msg = TG3_GPIO_MSG_NEED_VAUX;
2379
2380         msg = tg3_set_function_status(tp, msg);
2381
2382         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2383                 goto done;
2384
2385         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2386                 tg3_pwrsrc_switch_to_vaux(tp);
2387         else
2388                 tg3_pwrsrc_die_with_vmain(tp);
2389
2390 done:
2391         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2392 }
2393
2394 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2395 {
2396         bool need_vaux = false;
2397
2398         /* The GPIOs do something completely different on 57765. */
2399         if (!tg3_flag(tp, IS_NIC) ||
2400             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2401                 return;
2402
2403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2405             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2406                 tg3_frob_aux_power_5717(tp, include_wol ?
2407                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2408                 return;
2409         }
2410
2411         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2412                 struct net_device *dev_peer;
2413
2414                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2415
2416                 /* remove_one() may have been run on the peer. */
2417                 if (dev_peer) {
2418                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2419
2420                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2421                                 return;
2422
2423                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2424                             tg3_flag(tp_peer, ENABLE_ASF))
2425                                 need_vaux = true;
2426                 }
2427         }
2428
2429         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2430             tg3_flag(tp, ENABLE_ASF))
2431                 need_vaux = true;
2432
2433         if (need_vaux)
2434                 tg3_pwrsrc_switch_to_vaux(tp);
2435         else
2436                 tg3_pwrsrc_die_with_vmain(tp);
2437 }
2438
2439 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2440 {
2441         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2442                 return 1;
2443         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2444                 if (speed != SPEED_10)
2445                         return 1;
2446         } else if (speed == SPEED_10)
2447                 return 1;
2448
2449         return 0;
2450 }
2451
2452 static int tg3_setup_phy(struct tg3 *, int);
2453
2454 #define RESET_KIND_SHUTDOWN     0
2455 #define RESET_KIND_INIT         1
2456 #define RESET_KIND_SUSPEND      2
2457
2458 static void tg3_write_sig_post_reset(struct tg3 *, int);
2459 static int tg3_halt_cpu(struct tg3 *, u32);
2460
2461 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2462 {
2463         u32 val;
2464
2465         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2467                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2468                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2469
2470                         sg_dig_ctrl |=
2471                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2472                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2473                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2474                 }
2475                 return;
2476         }
2477
2478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2479                 tg3_bmcr_reset(tp);
2480                 val = tr32(GRC_MISC_CFG);
2481                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2482                 udelay(40);
2483                 return;
2484         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2485                 u32 phytest;
2486                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2487                         u32 phy;
2488
2489                         tg3_writephy(tp, MII_ADVERTISE, 0);
2490                         tg3_writephy(tp, MII_BMCR,
2491                                      BMCR_ANENABLE | BMCR_ANRESTART);
2492
2493                         tg3_writephy(tp, MII_TG3_FET_TEST,
2494                                      phytest | MII_TG3_FET_SHADOW_EN);
2495                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2496                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2497                                 tg3_writephy(tp,
2498                                              MII_TG3_FET_SHDW_AUXMODE4,
2499                                              phy);
2500                         }
2501                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2502                 }
2503                 return;
2504         } else if (do_low_power) {
2505                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2506                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2507
2508                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2509                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2510                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2511                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2512         }
2513
2514         /* The PHY should not be powered down on some chips because
2515          * of bugs.
2516          */
2517         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2519             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2520              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2521                 return;
2522
2523         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2524             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2525                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2526                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2527                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2528                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2529         }
2530
2531         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2532 }
2533
2534 /* tp->lock is held. */
2535 static int tg3_nvram_lock(struct tg3 *tp)
2536 {
2537         if (tg3_flag(tp, NVRAM)) {
2538                 int i;
2539
2540                 if (tp->nvram_lock_cnt == 0) {
2541                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2542                         for (i = 0; i < 8000; i++) {
2543                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2544                                         break;
2545                                 udelay(20);
2546                         }
2547                         if (i == 8000) {
2548                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2549                                 return -ENODEV;
2550                         }
2551                 }
2552                 tp->nvram_lock_cnt++;
2553         }
2554         return 0;
2555 }
2556
2557 /* tp->lock is held. */
2558 static void tg3_nvram_unlock(struct tg3 *tp)
2559 {
2560         if (tg3_flag(tp, NVRAM)) {
2561                 if (tp->nvram_lock_cnt > 0)
2562                         tp->nvram_lock_cnt--;
2563                 if (tp->nvram_lock_cnt == 0)
2564                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2565         }
2566 }
2567
2568 /* tp->lock is held. */
2569 static void tg3_enable_nvram_access(struct tg3 *tp)
2570 {
2571         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2572                 u32 nvaccess = tr32(NVRAM_ACCESS);
2573
2574                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2575         }
2576 }
2577
2578 /* tp->lock is held. */
2579 static void tg3_disable_nvram_access(struct tg3 *tp)
2580 {
2581         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2582                 u32 nvaccess = tr32(NVRAM_ACCESS);
2583
2584                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2585         }
2586 }
2587
2588 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2589                                         u32 offset, u32 *val)
2590 {
2591         u32 tmp;
2592         int i;
2593
2594         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2595                 return -EINVAL;
2596
2597         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2598                                         EEPROM_ADDR_DEVID_MASK |
2599                                         EEPROM_ADDR_READ);
2600         tw32(GRC_EEPROM_ADDR,
2601              tmp |
2602              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2603              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2604               EEPROM_ADDR_ADDR_MASK) |
2605              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2606
2607         for (i = 0; i < 1000; i++) {
2608                 tmp = tr32(GRC_EEPROM_ADDR);
2609
2610                 if (tmp & EEPROM_ADDR_COMPLETE)
2611                         break;
2612                 msleep(1);
2613         }
2614         if (!(tmp & EEPROM_ADDR_COMPLETE))
2615                 return -EBUSY;
2616
2617         tmp = tr32(GRC_EEPROM_DATA);
2618
2619         /*
2620          * The data will always be opposite the native endian
2621          * format.  Perform a blind byteswap to compensate.
2622          */
2623         *val = swab32(tmp);
2624
2625         return 0;
2626 }
2627
2628 #define NVRAM_CMD_TIMEOUT 10000
2629
2630 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2631 {
2632         int i;
2633
2634         tw32(NVRAM_CMD, nvram_cmd);
2635         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2636                 udelay(10);
2637                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2638                         udelay(10);
2639                         break;
2640                 }
2641         }
2642
2643         if (i == NVRAM_CMD_TIMEOUT)
2644                 return -EBUSY;
2645
2646         return 0;
2647 }
2648
2649 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2650 {
2651         if (tg3_flag(tp, NVRAM) &&
2652             tg3_flag(tp, NVRAM_BUFFERED) &&
2653             tg3_flag(tp, FLASH) &&
2654             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2655             (tp->nvram_jedecnum == JEDEC_ATMEL))
2656
2657                 addr = ((addr / tp->nvram_pagesize) <<
2658                         ATMEL_AT45DB0X1B_PAGE_POS) +
2659                        (addr % tp->nvram_pagesize);
2660
2661         return addr;
2662 }
2663
2664 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2665 {
2666         if (tg3_flag(tp, NVRAM) &&
2667             tg3_flag(tp, NVRAM_BUFFERED) &&
2668             tg3_flag(tp, FLASH) &&
2669             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2670             (tp->nvram_jedecnum == JEDEC_ATMEL))
2671
2672                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2673                         tp->nvram_pagesize) +
2674                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2675
2676         return addr;
2677 }
2678
2679 /* NOTE: Data read in from NVRAM is byteswapped according to
2680  * the byteswapping settings for all other register accesses.
2681  * tg3 devices are BE devices, so on a BE machine, the data
2682  * returned will be exactly as it is seen in NVRAM.  On a LE
2683  * machine, the 32-bit value will be byteswapped.
2684  */
2685 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2686 {
2687         int ret;
2688
2689         if (!tg3_flag(tp, NVRAM))
2690                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2691
2692         offset = tg3_nvram_phys_addr(tp, offset);
2693
2694         if (offset > NVRAM_ADDR_MSK)
2695                 return -EINVAL;
2696
2697         ret = tg3_nvram_lock(tp);
2698         if (ret)
2699                 return ret;
2700
2701         tg3_enable_nvram_access(tp);
2702
2703         tw32(NVRAM_ADDR, offset);
2704         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2705                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2706
2707         if (ret == 0)
2708                 *val = tr32(NVRAM_RDDATA);
2709
2710         tg3_disable_nvram_access(tp);
2711
2712         tg3_nvram_unlock(tp);
2713
2714         return ret;
2715 }
2716
2717 /* Ensures NVRAM data is in bytestream format. */
2718 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2719 {
2720         u32 v;
2721         int res = tg3_nvram_read(tp, offset, &v);
2722         if (!res)
2723                 *val = cpu_to_be32(v);
2724         return res;
2725 }
2726
2727 /* tp->lock is held. */
2728 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2729 {
2730         u32 addr_high, addr_low;
2731         int i;
2732
2733         addr_high = ((tp->dev->dev_addr[0] << 8) |
2734                      tp->dev->dev_addr[1]);
2735         addr_low = ((tp->dev->dev_addr[2] << 24) |
2736                     (tp->dev->dev_addr[3] << 16) |
2737                     (tp->dev->dev_addr[4] <<  8) |
2738                     (tp->dev->dev_addr[5] <<  0));
2739         for (i = 0; i < 4; i++) {
2740                 if (i == 1 && skip_mac_1)
2741                         continue;
2742                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2743                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2744         }
2745
2746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2747             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2748                 for (i = 0; i < 12; i++) {
2749                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2750                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2751                 }
2752         }
2753
2754         addr_high = (tp->dev->dev_addr[0] +
2755                      tp->dev->dev_addr[1] +
2756                      tp->dev->dev_addr[2] +
2757                      tp->dev->dev_addr[3] +
2758                      tp->dev->dev_addr[4] +
2759                      tp->dev->dev_addr[5]) &
2760                 TX_BACKOFF_SEED_MASK;
2761         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2762 }
2763
2764 static void tg3_enable_register_access(struct tg3 *tp)
2765 {
2766         /*
2767          * Make sure register accesses (indirect or otherwise) will function
2768          * correctly.
2769          */
2770         pci_write_config_dword(tp->pdev,
2771                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2772 }
2773
2774 static int tg3_power_up(struct tg3 *tp)
2775 {
2776         int err;
2777
2778         tg3_enable_register_access(tp);
2779
2780         err = pci_set_power_state(tp->pdev, PCI_D0);
2781         if (!err) {
2782                 /* Switch out of Vaux if it is a NIC */
2783                 tg3_pwrsrc_switch_to_vmain(tp);
2784         } else {
2785                 netdev_err(tp->dev, "Transition to D0 failed\n");
2786         }
2787
2788         return err;
2789 }
2790
2791 static int tg3_power_down_prepare(struct tg3 *tp)
2792 {
2793         u32 misc_host_ctrl;
2794         bool device_should_wake, do_low_power;
2795
2796         tg3_enable_register_access(tp);
2797
2798         /* Restore the CLKREQ setting. */
2799         if (tg3_flag(tp, CLKREQ_BUG)) {
2800                 u16 lnkctl;
2801
2802                 pci_read_config_word(tp->pdev,
2803                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2804                                      &lnkctl);
2805                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2806                 pci_write_config_word(tp->pdev,
2807                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2808                                       lnkctl);
2809         }
2810
2811         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2812         tw32(TG3PCI_MISC_HOST_CTRL,
2813              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2814
2815         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2816                              tg3_flag(tp, WOL_ENABLE);
2817
2818         if (tg3_flag(tp, USE_PHYLIB)) {
2819                 do_low_power = false;
2820                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2821                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2822                         struct phy_device *phydev;
2823                         u32 phyid, advertising;
2824
2825                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2826
2827                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2828
2829                         tp->link_config.orig_speed = phydev->speed;
2830                         tp->link_config.orig_duplex = phydev->duplex;
2831                         tp->link_config.orig_autoneg = phydev->autoneg;
2832                         tp->link_config.orig_advertising = phydev->advertising;
2833
2834                         advertising = ADVERTISED_TP |
2835                                       ADVERTISED_Pause |
2836                                       ADVERTISED_Autoneg |
2837                                       ADVERTISED_10baseT_Half;
2838
2839                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2840                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2841                                         advertising |=
2842                                                 ADVERTISED_100baseT_Half |
2843                                                 ADVERTISED_100baseT_Full |
2844                                                 ADVERTISED_10baseT_Full;
2845                                 else
2846                                         advertising |= ADVERTISED_10baseT_Full;
2847                         }
2848
2849                         phydev->advertising = advertising;
2850
2851                         phy_start_aneg(phydev);
2852
2853                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2854                         if (phyid != PHY_ID_BCMAC131) {
2855                                 phyid &= PHY_BCM_OUI_MASK;
2856                                 if (phyid == PHY_BCM_OUI_1 ||
2857                                     phyid == PHY_BCM_OUI_2 ||
2858                                     phyid == PHY_BCM_OUI_3)
2859                                         do_low_power = true;
2860                         }
2861                 }
2862         } else {
2863                 do_low_power = true;
2864
2865                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2866                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2867                         tp->link_config.orig_speed = tp->link_config.speed;
2868                         tp->link_config.orig_duplex = tp->link_config.duplex;
2869                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2870                 }
2871
2872                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2873                         tp->link_config.speed = SPEED_10;
2874                         tp->link_config.duplex = DUPLEX_HALF;
2875                         tp->link_config.autoneg = AUTONEG_ENABLE;
2876                         tg3_setup_phy(tp, 0);
2877                 }
2878         }
2879
2880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2881                 u32 val;
2882
2883                 val = tr32(GRC_VCPU_EXT_CTRL);
2884                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2885         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2886                 int i;
2887                 u32 val;
2888
2889                 for (i = 0; i < 200; i++) {
2890                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2891                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2892                                 break;
2893                         msleep(1);
2894                 }
2895         }
2896         if (tg3_flag(tp, WOL_CAP))
2897                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2898                                                      WOL_DRV_STATE_SHUTDOWN |
2899                                                      WOL_DRV_WOL |
2900                                                      WOL_SET_MAGIC_PKT);
2901
2902         if (device_should_wake) {
2903                 u32 mac_mode;
2904
2905                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2906                         if (do_low_power &&
2907                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2908                                 tg3_phy_auxctl_write(tp,
2909                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2910                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2911                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2912                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2913                                 udelay(40);
2914                         }
2915
2916                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2917                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2918                         else
2919                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2920
2921                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2922                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2923                             ASIC_REV_5700) {
2924                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2925                                              SPEED_100 : SPEED_10;
2926                                 if (tg3_5700_link_polarity(tp, speed))
2927                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2928                                 else
2929                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2930                         }
2931                 } else {
2932                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2933                 }
2934
2935                 if (!tg3_flag(tp, 5750_PLUS))
2936                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2937
2938                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2939                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2940                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2941                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2942
2943                 if (tg3_flag(tp, ENABLE_APE))
2944                         mac_mode |= MAC_MODE_APE_TX_EN |
2945                                     MAC_MODE_APE_RX_EN |
2946                                     MAC_MODE_TDE_ENABLE;
2947
2948                 tw32_f(MAC_MODE, mac_mode);
2949                 udelay(100);
2950
2951                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2952                 udelay(10);
2953         }
2954
2955         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2956             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2957              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2958                 u32 base_val;
2959
2960                 base_val = tp->pci_clock_ctrl;
2961                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2962                              CLOCK_CTRL_TXCLK_DISABLE);
2963
2964                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2965                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2966         } else if (tg3_flag(tp, 5780_CLASS) ||
2967                    tg3_flag(tp, CPMU_PRESENT) ||
2968                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2969                 /* do nothing */
2970         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2971                 u32 newbits1, newbits2;
2972
2973                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2974                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2975                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2976                                     CLOCK_CTRL_TXCLK_DISABLE |
2977                                     CLOCK_CTRL_ALTCLK);
2978                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2979                 } else if (tg3_flag(tp, 5705_PLUS)) {
2980                         newbits1 = CLOCK_CTRL_625_CORE;
2981                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2982                 } else {
2983                         newbits1 = CLOCK_CTRL_ALTCLK;
2984                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2985                 }
2986
2987                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2988                             40);
2989
2990                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2991                             40);
2992
2993                 if (!tg3_flag(tp, 5705_PLUS)) {
2994                         u32 newbits3;
2995
2996                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2997                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2998                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2999                                             CLOCK_CTRL_TXCLK_DISABLE |
3000                                             CLOCK_CTRL_44MHZ_CORE);
3001                         } else {
3002                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3003                         }
3004
3005                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3006                                     tp->pci_clock_ctrl | newbits3, 40);
3007                 }
3008         }
3009
3010         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3011                 tg3_power_down_phy(tp, do_low_power);
3012
3013         tg3_frob_aux_power(tp, true);
3014
3015         /* Workaround for unstable PLL clock */
3016         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3017             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3018                 u32 val = tr32(0x7d00);
3019
3020                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3021                 tw32(0x7d00, val);
3022                 if (!tg3_flag(tp, ENABLE_ASF)) {
3023                         int err;
3024
3025                         err = tg3_nvram_lock(tp);
3026                         tg3_halt_cpu(tp, RX_CPU_BASE);
3027                         if (!err)
3028                                 tg3_nvram_unlock(tp);
3029                 }
3030         }
3031
3032         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3033
3034         return 0;
3035 }
3036
3037 static void tg3_power_down(struct tg3 *tp)
3038 {
3039         tg3_power_down_prepare(tp);
3040
3041         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3042         pci_set_power_state(tp->pdev, PCI_D3hot);
3043 }
3044
3045 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3046 {
3047         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3048         case MII_TG3_AUX_STAT_10HALF:
3049                 *speed = SPEED_10;
3050                 *duplex = DUPLEX_HALF;
3051                 break;
3052
3053         case MII_TG3_AUX_STAT_10FULL:
3054                 *speed = SPEED_10;
3055                 *duplex = DUPLEX_FULL;
3056                 break;
3057
3058         case MII_TG3_AUX_STAT_100HALF:
3059                 *speed = SPEED_100;
3060                 *duplex = DUPLEX_HALF;
3061                 break;
3062
3063         case MII_TG3_AUX_STAT_100FULL:
3064                 *speed = SPEED_100;
3065                 *duplex = DUPLEX_FULL;
3066                 break;
3067
3068         case MII_TG3_AUX_STAT_1000HALF:
3069                 *speed = SPEED_1000;
3070                 *duplex = DUPLEX_HALF;
3071                 break;
3072
3073         case MII_TG3_AUX_STAT_1000FULL:
3074                 *speed = SPEED_1000;
3075                 *duplex = DUPLEX_FULL;
3076                 break;
3077
3078         default:
3079                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3080                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3081                                  SPEED_10;
3082                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3083                                   DUPLEX_HALF;
3084                         break;
3085                 }
3086                 *speed = SPEED_INVALID;
3087                 *duplex = DUPLEX_INVALID;
3088                 break;
3089         }
3090 }
3091
3092 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3093 {
3094         int err = 0;
3095         u32 val, new_adv;
3096
3097         new_adv = ADVERTISE_CSMA;
3098         if (advertise & ADVERTISED_10baseT_Half)
3099                 new_adv |= ADVERTISE_10HALF;
3100         if (advertise & ADVERTISED_10baseT_Full)
3101                 new_adv |= ADVERTISE_10FULL;
3102         if (advertise & ADVERTISED_100baseT_Half)
3103                 new_adv |= ADVERTISE_100HALF;
3104         if (advertise & ADVERTISED_100baseT_Full)
3105                 new_adv |= ADVERTISE_100FULL;
3106
3107         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3108
3109         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3110         if (err)
3111                 goto done;
3112
3113         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3114                 goto done;
3115
3116         new_adv = 0;
3117         if (advertise & ADVERTISED_1000baseT_Half)
3118                 new_adv |= ADVERTISE_1000HALF;
3119         if (advertise & ADVERTISED_1000baseT_Full)
3120                 new_adv |= ADVERTISE_1000FULL;
3121
3122         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3123             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3124                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3125
3126         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3127         if (err)
3128                 goto done;
3129
3130         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3131                 goto done;
3132
3133         tw32(TG3_CPMU_EEE_MODE,
3134              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3135
3136         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3137         if (!err) {
3138                 u32 err2;
3139
3140                 val = 0;
3141                 /* Advertise 100-BaseTX EEE ability */
3142                 if (advertise & ADVERTISED_100baseT_Full)
3143                         val |= MDIO_AN_EEE_ADV_100TX;
3144                 /* Advertise 1000-BaseT EEE ability */
3145                 if (advertise & ADVERTISED_1000baseT_Full)
3146                         val |= MDIO_AN_EEE_ADV_1000T;
3147                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3148                 if (err)
3149                         val = 0;
3150
3151                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3152                 case ASIC_REV_5717:
3153                 case ASIC_REV_57765:
3154                 case ASIC_REV_5719:
3155                         /* If we advertised any eee advertisements above... */
3156                         if (val)
3157                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3158                                       MII_TG3_DSP_TAP26_RMRXSTO |
3159                                       MII_TG3_DSP_TAP26_OPCSINPT;
3160                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3161                         /* Fall through */
3162                 case ASIC_REV_5720:
3163                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3164                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3165                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3166                 }
3167
3168                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3169                 if (!err)
3170                         err = err2;
3171         }
3172
3173 done:
3174         return err;
3175 }
3176
3177 static void tg3_phy_copper_begin(struct tg3 *tp)
3178 {
3179         u32 new_adv;
3180         int i;
3181
3182         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3183                 new_adv = ADVERTISED_10baseT_Half |
3184                           ADVERTISED_10baseT_Full;
3185                 if (tg3_flag(tp, WOL_SPEED_100MB))
3186                         new_adv |= ADVERTISED_100baseT_Half |
3187                                    ADVERTISED_100baseT_Full;
3188
3189                 tg3_phy_autoneg_cfg(tp, new_adv,
3190                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3191         } else if (tp->link_config.speed == SPEED_INVALID) {
3192                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3193                         tp->link_config.advertising &=
3194                                 ~(ADVERTISED_1000baseT_Half |
3195                                   ADVERTISED_1000baseT_Full);
3196
3197                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3198                                     tp->link_config.flowctrl);
3199         } else {
3200                 /* Asking for a specific link mode. */
3201                 if (tp->link_config.speed == SPEED_1000) {
3202                         if (tp->link_config.duplex == DUPLEX_FULL)
3203                                 new_adv = ADVERTISED_1000baseT_Full;
3204                         else
3205                                 new_adv = ADVERTISED_1000baseT_Half;
3206                 } else if (tp->link_config.speed == SPEED_100) {
3207                         if (tp->link_config.duplex == DUPLEX_FULL)
3208                                 new_adv = ADVERTISED_100baseT_Full;
3209                         else
3210                                 new_adv = ADVERTISED_100baseT_Half;
3211                 } else {
3212                         if (tp->link_config.duplex == DUPLEX_FULL)
3213                                 new_adv = ADVERTISED_10baseT_Full;
3214                         else
3215                                 new_adv = ADVERTISED_10baseT_Half;
3216                 }
3217
3218                 tg3_phy_autoneg_cfg(tp, new_adv,
3219                                     tp->link_config.flowctrl);
3220         }
3221
3222         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3223             tp->link_config.speed != SPEED_INVALID) {
3224                 u32 bmcr, orig_bmcr;
3225
3226                 tp->link_config.active_speed = tp->link_config.speed;
3227                 tp->link_config.active_duplex = tp->link_config.duplex;
3228
3229                 bmcr = 0;
3230                 switch (tp->link_config.speed) {
3231                 default:
3232                 case SPEED_10:
3233                         break;
3234
3235                 case SPEED_100:
3236                         bmcr |= BMCR_SPEED100;
3237                         break;
3238
3239                 case SPEED_1000:
3240                         bmcr |= BMCR_SPEED1000;
3241                         break;
3242                 }
3243
3244                 if (tp->link_config.duplex == DUPLEX_FULL)
3245                         bmcr |= BMCR_FULLDPLX;
3246
3247                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3248                     (bmcr != orig_bmcr)) {
3249                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3250                         for (i = 0; i < 1500; i++) {
3251                                 u32 tmp;
3252
3253                                 udelay(10);
3254                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3255                                     tg3_readphy(tp, MII_BMSR, &tmp))
3256                                         continue;
3257                                 if (!(tmp & BMSR_LSTATUS)) {
3258                                         udelay(40);
3259                                         break;
3260                                 }
3261                         }
3262                         tg3_writephy(tp, MII_BMCR, bmcr);
3263                         udelay(40);
3264                 }
3265         } else {
3266                 tg3_writephy(tp, MII_BMCR,
3267                              BMCR_ANENABLE | BMCR_ANRESTART);
3268         }
3269 }
3270
3271 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3272 {
3273         int err;
3274
3275         /* Turn off tap power management. */
3276         /* Set Extended packet length bit */
3277         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3278
3279         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3280         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3281         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3282         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3283         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3284
3285         udelay(40);
3286
3287         return err;
3288 }
3289
3290 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3291 {
3292         u32 adv_reg, all_mask = 0;
3293
3294         if (mask & ADVERTISED_10baseT_Half)
3295                 all_mask |= ADVERTISE_10HALF;
3296         if (mask & ADVERTISED_10baseT_Full)
3297                 all_mask |= ADVERTISE_10FULL;
3298         if (mask & ADVERTISED_100baseT_Half)
3299                 all_mask |= ADVERTISE_100HALF;
3300         if (mask & ADVERTISED_100baseT_Full)
3301                 all_mask |= ADVERTISE_100FULL;
3302
3303         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3304                 return 0;
3305
3306         if ((adv_reg & all_mask) != all_mask)
3307                 return 0;
3308         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3309                 u32 tg3_ctrl;
3310
3311                 all_mask = 0;
3312                 if (mask & ADVERTISED_1000baseT_Half)
3313                         all_mask |= ADVERTISE_1000HALF;
3314                 if (mask & ADVERTISED_1000baseT_Full)
3315                         all_mask |= ADVERTISE_1000FULL;
3316
3317                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3318                         return 0;
3319
3320                 if ((tg3_ctrl & all_mask) != all_mask)
3321                         return 0;
3322         }
3323         return 1;
3324 }
3325
3326 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3327 {
3328         u32 curadv, reqadv;
3329
3330         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3331                 return 1;
3332
3333         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3334         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3335
3336         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3337                 if (curadv != reqadv)
3338                         return 0;
3339
3340                 if (tg3_flag(tp, PAUSE_AUTONEG))
3341                         tg3_readphy(tp, MII_LPA, rmtadv);
3342         } else {
3343                 /* Reprogram the advertisement register, even if it
3344                  * does not affect the current link.  If the link
3345                  * gets renegotiated in the future, we can save an
3346                  * additional renegotiation cycle by advertising
3347                  * it correctly in the first place.
3348                  */
3349                 if (curadv != reqadv) {
3350                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3351                                      ADVERTISE_PAUSE_ASYM);
3352                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3353                 }
3354         }
3355
3356         return 1;
3357 }
3358
3359 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3360 {
3361         int current_link_up;
3362         u32 bmsr, val;
3363         u32 lcl_adv, rmt_adv;
3364         u16 current_speed;
3365         u8 current_duplex;
3366         int i, err;
3367
3368         tw32(MAC_EVENT, 0);
3369
3370         tw32_f(MAC_STATUS,
3371              (MAC_STATUS_SYNC_CHANGED |
3372               MAC_STATUS_CFG_CHANGED |
3373               MAC_STATUS_MI_COMPLETION |
3374               MAC_STATUS_LNKSTATE_CHANGED));
3375         udelay(40);
3376
3377         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3378                 tw32_f(MAC_MI_MODE,
3379                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3380                 udelay(80);
3381         }
3382
3383         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3384
3385         /* Some third-party PHYs need to be reset on link going
3386          * down.
3387          */
3388         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3389              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3390              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3391             netif_carrier_ok(tp->dev)) {
3392                 tg3_readphy(tp, MII_BMSR, &bmsr);
3393                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3394                     !(bmsr & BMSR_LSTATUS))
3395                         force_reset = 1;
3396         }
3397         if (force_reset)
3398                 tg3_phy_reset(tp);
3399
3400         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3401                 tg3_readphy(tp, MII_BMSR, &bmsr);
3402                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3403                     !tg3_flag(tp, INIT_COMPLETE))
3404                         bmsr = 0;
3405
3406                 if (!(bmsr & BMSR_LSTATUS)) {
3407                         err = tg3_init_5401phy_dsp(tp);
3408                         if (err)
3409                                 return err;
3410
3411                         tg3_readphy(tp, MII_BMSR, &bmsr);
3412                         for (i = 0; i < 1000; i++) {
3413                                 udelay(10);
3414                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3415                                     (bmsr & BMSR_LSTATUS)) {
3416                                         udelay(40);
3417                                         break;
3418                                 }
3419                         }
3420
3421                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3422                             TG3_PHY_REV_BCM5401_B0 &&
3423                             !(bmsr & BMSR_LSTATUS) &&
3424                             tp->link_config.active_speed == SPEED_1000) {
3425                                 err = tg3_phy_reset(tp);
3426                                 if (!err)
3427                                         err = tg3_init_5401phy_dsp(tp);
3428                                 if (err)
3429                                         return err;
3430                         }
3431                 }
3432         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3433                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3434                 /* 5701 {A0,B0} CRC bug workaround */
3435                 tg3_writephy(tp, 0x15, 0x0a75);
3436                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3437                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3438                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3439         }
3440
3441         /* Clear pending interrupts... */
3442         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3444
3445         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3446                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3447         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3448                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3449
3450         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3451             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3452                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3453                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3454                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3455                 else
3456                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3457         }
3458
3459         current_link_up = 0;
3460         current_speed = SPEED_INVALID;
3461         current_duplex = DUPLEX_INVALID;
3462
3463         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3464                 err = tg3_phy_auxctl_read(tp,
3465                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3466                                           &val);
3467                 if (!err && !(val & (1 << 10))) {
3468                         tg3_phy_auxctl_write(tp,
3469                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3470                                              val | (1 << 10));
3471                         goto relink;
3472                 }
3473         }
3474
3475         bmsr = 0;
3476         for (i = 0; i < 100; i++) {
3477                 tg3_readphy(tp, MII_BMSR, &bmsr);
3478                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3479                     (bmsr & BMSR_LSTATUS))
3480                         break;
3481                 udelay(40);
3482         }
3483
3484         if (bmsr & BMSR_LSTATUS) {
3485                 u32 aux_stat, bmcr;
3486
3487                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3488                 for (i = 0; i < 2000; i++) {
3489                         udelay(10);
3490                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3491                             aux_stat)
3492                                 break;
3493                 }
3494
3495                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3496                                              &current_speed,
3497                                              &current_duplex);
3498
3499                 bmcr = 0;
3500                 for (i = 0; i < 200; i++) {
3501                         tg3_readphy(tp, MII_BMCR, &bmcr);
3502                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3503                                 continue;
3504                         if (bmcr && bmcr != 0x7fff)
3505                                 break;
3506                         udelay(10);
3507                 }
3508
3509                 lcl_adv = 0;
3510                 rmt_adv = 0;
3511
3512                 tp->link_config.active_speed = current_speed;
3513                 tp->link_config.active_duplex = current_duplex;
3514
3515                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3516                         if ((bmcr & BMCR_ANENABLE) &&
3517                             tg3_copper_is_advertising_all(tp,
3518                                                 tp->link_config.advertising)) {
3519                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3520                                                                   &rmt_adv))
3521                                         current_link_up = 1;
3522                         }
3523                 } else {
3524                         if (!(bmcr & BMCR_ANENABLE) &&
3525                             tp->link_config.speed == current_speed &&
3526                             tp->link_config.duplex == current_duplex &&
3527                             tp->link_config.flowctrl ==
3528                             tp->link_config.active_flowctrl) {
3529                                 current_link_up = 1;
3530                         }
3531                 }
3532
3533                 if (current_link_up == 1 &&
3534                     tp->link_config.active_duplex == DUPLEX_FULL)
3535                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3536         }
3537
3538 relink:
3539         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3540                 tg3_phy_copper_begin(tp);
3541
3542                 tg3_readphy(tp, MII_BMSR, &bmsr);
3543                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3544                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3545                         current_link_up = 1;
3546         }
3547
3548         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3549         if (current_link_up == 1) {
3550                 if (tp->link_config.active_speed == SPEED_100 ||
3551                     tp->link_config.active_speed == SPEED_10)
3552                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3553                 else
3554                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3555         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3556                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3557         else
3558                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3559
3560         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3561         if (tp->link_config.active_duplex == DUPLEX_HALF)
3562                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3563
3564         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3565                 if (current_link_up == 1 &&
3566                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3567                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3568                 else
3569                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3570         }
3571
3572         /* ??? Without this setting Netgear GA302T PHY does not
3573          * ??? send/receive packets...
3574          */
3575         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3576             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3577                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3578                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3579                 udelay(80);
3580         }
3581
3582         tw32_f(MAC_MODE, tp->mac_mode);
3583         udelay(40);
3584
3585         tg3_phy_eee_adjust(tp, current_link_up);
3586
3587         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3588                 /* Polled via timer. */
3589                 tw32_f(MAC_EVENT, 0);
3590         } else {
3591                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3592         }
3593         udelay(40);
3594
3595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3596             current_link_up == 1 &&
3597             tp->link_config.active_speed == SPEED_1000 &&
3598             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3599                 udelay(120);
3600                 tw32_f(MAC_STATUS,
3601                      (MAC_STATUS_SYNC_CHANGED |
3602                       MAC_STATUS_CFG_CHANGED));
3603                 udelay(40);
3604                 tg3_write_mem(tp,
3605                               NIC_SRAM_FIRMWARE_MBOX,
3606                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3607         }
3608
3609         /* Prevent send BD corruption. */
3610         if (tg3_flag(tp, CLKREQ_BUG)) {
3611                 u16 oldlnkctl, newlnkctl;
3612
3613                 pci_read_config_word(tp->pdev,
3614                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3615                                      &oldlnkctl);
3616                 if (tp->link_config.active_speed == SPEED_100 ||
3617                     tp->link_config.active_speed == SPEED_10)
3618                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3619                 else
3620                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3621                 if (newlnkctl != oldlnkctl)
3622                         pci_write_config_word(tp->pdev,
3623                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3624                                               newlnkctl);
3625         }
3626
3627         if (current_link_up != netif_carrier_ok(tp->dev)) {
3628                 if (current_link_up)
3629                         netif_carrier_on(tp->dev);
3630                 else
3631                         netif_carrier_off(tp->dev);
3632                 tg3_link_report(tp);
3633         }
3634
3635         return 0;
3636 }
3637
3638 struct tg3_fiber_aneginfo {
3639         int state;
3640 #define ANEG_STATE_UNKNOWN              0
3641 #define ANEG_STATE_AN_ENABLE            1
3642 #define ANEG_STATE_RESTART_INIT         2
3643 #define ANEG_STATE_RESTART              3
3644 #define ANEG_STATE_DISABLE_LINK_OK      4
3645 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3646 #define ANEG_STATE_ABILITY_DETECT       6
3647 #define ANEG_STATE_ACK_DETECT_INIT      7
3648 #define ANEG_STATE_ACK_DETECT           8
3649 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3650 #define ANEG_STATE_COMPLETE_ACK         10
3651 #define ANEG_STATE_IDLE_DETECT_INIT     11
3652 #define ANEG_STATE_IDLE_DETECT          12
3653 #define ANEG_STATE_LINK_OK              13
3654 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3655 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3656
3657         u32 flags;
3658 #define MR_AN_ENABLE            0x00000001
3659 #define MR_RESTART_AN           0x00000002
3660 #define MR_AN_COMPLETE          0x00000004
3661 #define MR_PAGE_RX              0x00000008
3662 #define MR_NP_LOADED            0x00000010
3663 #define MR_TOGGLE_TX            0x00000020
3664 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3665 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3666 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3667 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3668 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3669 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3670 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3671 #define MR_TOGGLE_RX            0x00002000
3672 #define MR_NP_RX                0x00004000
3673
3674 #define MR_LINK_OK              0x80000000
3675
3676         unsigned long link_time, cur_time;
3677
3678         u32 ability_match_cfg;
3679         int ability_match_count;
3680
3681         char ability_match, idle_match, ack_match;
3682
3683         u32 txconfig, rxconfig;
3684 #define ANEG_CFG_NP             0x00000080
3685 #define ANEG_CFG_ACK            0x00000040
3686 #define ANEG_CFG_RF2            0x00000020
3687 #define ANEG_CFG_RF1            0x00000010
3688 #define ANEG_CFG_PS2            0x00000001
3689 #define ANEG_CFG_PS1            0x00008000
3690 #define ANEG_CFG_HD             0x00004000
3691 #define ANEG_CFG_FD             0x00002000
3692 #define ANEG_CFG_INVAL          0x00001f06
3693
3694 };
3695 #define ANEG_OK         0
3696 #define ANEG_DONE       1
3697 #define ANEG_TIMER_ENAB 2
3698 #define ANEG_FAILED     -1
3699
3700 #define ANEG_STATE_SETTLE_TIME  10000
3701
3702 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3703                                    struct tg3_fiber_aneginfo *ap)
3704 {
3705         u16 flowctrl;
3706         unsigned long delta;
3707         u32 rx_cfg_reg;
3708         int ret;
3709
3710         if (ap->state == ANEG_STATE_UNKNOWN) {
3711                 ap->rxconfig = 0;
3712                 ap->link_time = 0;
3713                 ap->cur_time = 0;
3714                 ap->ability_match_cfg = 0;
3715                 ap->ability_match_count = 0;
3716                 ap->ability_match = 0;
3717                 ap->idle_match = 0;
3718                 ap->ack_match = 0;
3719         }
3720         ap->cur_time++;
3721
3722         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3723                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3724
3725                 if (rx_cfg_reg != ap->ability_match_cfg) {
3726                         ap->ability_match_cfg = rx_cfg_reg;
3727                         ap->ability_match = 0;
3728                         ap->ability_match_count = 0;
3729                 } else {
3730                         if (++ap->ability_match_count > 1) {
3731                                 ap->ability_match = 1;
3732                                 ap->ability_match_cfg = rx_cfg_reg;
3733                         }
3734                 }
3735                 if (rx_cfg_reg & ANEG_CFG_ACK)
3736                         ap->ack_match = 1;
3737                 else
3738                         ap->ack_match = 0;
3739
3740                 ap->idle_match = 0;
3741         } else {
3742                 ap->idle_match = 1;
3743                 ap->ability_match_cfg = 0;
3744                 ap->ability_match_count = 0;
3745                 ap->ability_match = 0;
3746                 ap->ack_match = 0;
3747
3748                 rx_cfg_reg = 0;
3749         }
3750
3751         ap->rxconfig = rx_cfg_reg;
3752         ret = ANEG_OK;
3753
3754         switch (ap->state) {
3755         case ANEG_STATE_UNKNOWN:
3756                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3757                         ap->state = ANEG_STATE_AN_ENABLE;
3758
3759                 /* fallthru */
3760         case ANEG_STATE_AN_ENABLE:
3761                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3762                 if (ap->flags & MR_AN_ENABLE) {
3763                         ap->link_time = 0;
3764                         ap->cur_time = 0;
3765                         ap->ability_match_cfg = 0;
3766                         ap->ability_match_count = 0;
3767                         ap->ability_match = 0;
3768                         ap->idle_match = 0;
3769                         ap->ack_match = 0;
3770
3771                         ap->state = ANEG_STATE_RESTART_INIT;
3772                 } else {
3773                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3774                 }
3775                 break;
3776
3777         case ANEG_STATE_RESTART_INIT:
3778                 ap->link_time = ap->cur_time;
3779                 ap->flags &= ~(MR_NP_LOADED);
3780                 ap->txconfig = 0;
3781                 tw32(MAC_TX_AUTO_NEG, 0);
3782                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3783                 tw32_f(MAC_MODE, tp->mac_mode);
3784                 udelay(40);
3785
3786                 ret = ANEG_TIMER_ENAB;
3787                 ap->state = ANEG_STATE_RESTART;
3788
3789                 /* fallthru */
3790         case ANEG_STATE_RESTART:
3791                 delta = ap->cur_time - ap->link_time;
3792                 if (delta > ANEG_STATE_SETTLE_TIME)
3793                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3794                 else
3795                         ret = ANEG_TIMER_ENAB;
3796                 break;
3797
3798         case ANEG_STATE_DISABLE_LINK_OK:
3799                 ret = ANEG_DONE;
3800                 break;
3801
3802         case ANEG_STATE_ABILITY_DETECT_INIT:
3803                 ap->flags &= ~(MR_TOGGLE_TX);
3804                 ap->txconfig = ANEG_CFG_FD;
3805                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3806                 if (flowctrl & ADVERTISE_1000XPAUSE)
3807                         ap->txconfig |= ANEG_CFG_PS1;
3808                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3809                         ap->txconfig |= ANEG_CFG_PS2;
3810                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3811                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3812                 tw32_f(MAC_MODE, tp->mac_mode);
3813                 udelay(40);
3814
3815                 ap->state = ANEG_STATE_ABILITY_DETECT;
3816                 break;
3817
3818         case ANEG_STATE_ABILITY_DETECT:
3819                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3820                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3821                 break;
3822
3823         case ANEG_STATE_ACK_DETECT_INIT:
3824                 ap->txconfig |= ANEG_CFG_ACK;
3825                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3826                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3827                 tw32_f(MAC_MODE, tp->mac_mode);
3828                 udelay(40);
3829
3830                 ap->state = ANEG_STATE_ACK_DETECT;
3831
3832                 /* fallthru */
3833         case ANEG_STATE_ACK_DETECT:
3834                 if (ap->ack_match != 0) {
3835                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3836                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3837                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3838                         } else {
3839                                 ap->state = ANEG_STATE_AN_ENABLE;
3840                         }
3841                 } else if (ap->ability_match != 0 &&
3842                            ap->rxconfig == 0) {
3843                         ap->state = ANEG_STATE_AN_ENABLE;
3844                 }
3845                 break;
3846
3847         case ANEG_STATE_COMPLETE_ACK_INIT:
3848                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3849                         ret = ANEG_FAILED;
3850                         break;
3851                 }
3852                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3853                                MR_LP_ADV_HALF_DUPLEX |
3854                                MR_LP_ADV_SYM_PAUSE |
3855                                MR_LP_ADV_ASYM_PAUSE |
3856                                MR_LP_ADV_REMOTE_FAULT1 |
3857                                MR_LP_ADV_REMOTE_FAULT2 |
3858                                MR_LP_ADV_NEXT_PAGE |
3859                                MR_TOGGLE_RX |
3860                                MR_NP_RX);
3861                 if (ap->rxconfig & ANEG_CFG_FD)
3862                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3863                 if (ap->rxconfig & ANEG_CFG_HD)
3864                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3865                 if (ap->rxconfig & ANEG_CFG_PS1)
3866                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3867                 if (ap->rxconfig & ANEG_CFG_PS2)
3868                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3869                 if (ap->rxconfig & ANEG_CFG_RF1)
3870                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3871                 if (ap->rxconfig & ANEG_CFG_RF2)
3872                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3873                 if (ap->rxconfig & ANEG_CFG_NP)
3874                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3875
3876                 ap->link_time = ap->cur_time;
3877
3878                 ap->flags ^= (MR_TOGGLE_TX);
3879                 if (ap->rxconfig & 0x0008)
3880                         ap->flags |= MR_TOGGLE_RX;
3881                 if (ap->rxconfig & ANEG_CFG_NP)
3882                         ap->flags |= MR_NP_RX;
3883                 ap->flags |= MR_PAGE_RX;
3884
3885                 ap->state = ANEG_STATE_COMPLETE_ACK;
3886                 ret = ANEG_TIMER_ENAB;
3887                 break;
3888
3889         case ANEG_STATE_COMPLETE_ACK:
3890                 if (ap->ability_match != 0 &&
3891                     ap->rxconfig == 0) {
3892                         ap->state = ANEG_STATE_AN_ENABLE;
3893                         break;
3894                 }
3895                 delta = ap->cur_time - ap->link_time;
3896                 if (delta > ANEG_STATE_SETTLE_TIME) {
3897                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3898                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3899                         } else {
3900                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3901                                     !(ap->flags & MR_NP_RX)) {
3902                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3903                                 } else {
3904                                         ret = ANEG_FAILED;
3905                                 }
3906                         }
3907                 }
3908                 break;
3909
3910         case ANEG_STATE_IDLE_DETECT_INIT:
3911                 ap->link_time = ap->cur_time;
3912                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3913                 tw32_f(MAC_MODE, tp->mac_mode);
3914                 udelay(40);
3915
3916                 ap->state = ANEG_STATE_IDLE_DETECT;
3917                 ret = ANEG_TIMER_ENAB;
3918                 break;
3919
3920         case ANEG_STATE_IDLE_DETECT:
3921                 if (ap->ability_match != 0 &&
3922                     ap->rxconfig == 0) {
3923                         ap->state = ANEG_STATE_AN_ENABLE;
3924                         break;
3925                 }
3926                 delta = ap->cur_time - ap->link_time;
3927                 if (delta > ANEG_STATE_SETTLE_TIME) {
3928                         /* XXX another gem from the Broadcom driver :( */
3929                         ap->state = ANEG_STATE_LINK_OK;
3930                 }
3931                 break;
3932
3933         case ANEG_STATE_LINK_OK:
3934                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3935                 ret = ANEG_DONE;
3936                 break;
3937
3938         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3939                 /* ??? unimplemented */
3940                 break;
3941
3942         case ANEG_STATE_NEXT_PAGE_WAIT:
3943                 /* ??? unimplemented */
3944                 break;
3945
3946         default:
3947                 ret = ANEG_FAILED;
3948                 break;
3949         }
3950
3951         return ret;
3952 }
3953
3954 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3955 {
3956         int res = 0;
3957         struct tg3_fiber_aneginfo aninfo;
3958         int status = ANEG_FAILED;
3959         unsigned int tick;
3960         u32 tmp;
3961
3962         tw32_f(MAC_TX_AUTO_NEG, 0);
3963
3964         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3965         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3966         udelay(40);
3967
3968         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3969         udelay(40);
3970
3971         memset(&aninfo, 0, sizeof(aninfo));
3972         aninfo.flags |= MR_AN_ENABLE;
3973         aninfo.state = ANEG_STATE_UNKNOWN;
3974         aninfo.cur_time = 0;
3975         tick = 0;
3976         while (++tick < 195000) {
3977                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3978                 if (status == ANEG_DONE || status == ANEG_FAILED)
3979                         break;
3980
3981                 udelay(1);
3982         }
3983
3984         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3985         tw32_f(MAC_MODE, tp->mac_mode);
3986         udelay(40);
3987
3988         *txflags = aninfo.txconfig;
3989         *rxflags = aninfo.flags;
3990
3991         if (status == ANEG_DONE &&
3992             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3993                              MR_LP_ADV_FULL_DUPLEX)))
3994                 res = 1;
3995
3996         return res;
3997 }
3998
3999 static void tg3_init_bcm8002(struct tg3 *tp)
4000 {
4001         u32 mac_status = tr32(MAC_STATUS);
4002         int i;
4003
4004         /* Reset when initting first time or we have a link. */
4005         if (tg3_flag(tp, INIT_COMPLETE) &&
4006             !(mac_status & MAC_STATUS_PCS_SYNCED))
4007                 return;
4008
4009         /* Set PLL lock range. */
4010         tg3_writephy(tp, 0x16, 0x8007);
4011
4012         /* SW reset */
4013         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4014
4015         /* Wait for reset to complete. */
4016         /* XXX schedule_timeout() ... */
4017         for (i = 0; i < 500; i++)
4018                 udelay(10);
4019
4020         /* Config mode; select PMA/Ch 1 regs. */
4021         tg3_writephy(tp, 0x10, 0x8411);
4022
4023         /* Enable auto-lock and comdet, select txclk for tx. */
4024         tg3_writephy(tp, 0x11, 0x0a10);
4025
4026         tg3_writephy(tp, 0x18, 0x00a0);
4027         tg3_writephy(tp, 0x16, 0x41ff);
4028
4029         /* Assert and deassert POR. */
4030         tg3_writephy(tp, 0x13, 0x0400);
4031         udelay(40);
4032         tg3_writephy(tp, 0x13, 0x0000);
4033
4034         tg3_writephy(tp, 0x11, 0x0a50);
4035         udelay(40);
4036         tg3_writephy(tp, 0x11, 0x0a10);
4037
4038         /* Wait for signal to stabilize */
4039         /* XXX schedule_timeout() ... */
4040         for (i = 0; i < 15000; i++)
4041                 udelay(10);
4042
4043         /* Deselect the channel register so we can read the PHYID
4044          * later.
4045          */
4046         tg3_writephy(tp, 0x10, 0x8011);
4047 }
4048
4049 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4050 {
4051         u16 flowctrl;
4052         u32 sg_dig_ctrl, sg_dig_status;
4053         u32 serdes_cfg, expected_sg_dig_ctrl;
4054         int workaround, port_a;
4055         int current_link_up;
4056
4057         serdes_cfg = 0;
4058         expected_sg_dig_ctrl = 0;
4059         workaround = 0;
4060         port_a = 1;
4061         current_link_up = 0;
4062
4063         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4064             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4065                 workaround = 1;
4066                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4067                         port_a = 0;
4068
4069                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4070                 /* preserve bits 20-23 for voltage regulator */
4071                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4072         }
4073
4074         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4075
4076         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4077                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4078                         if (workaround) {
4079                                 u32 val = serdes_cfg;
4080
4081                                 if (port_a)
4082                                         val |= 0xc010000;
4083                                 else
4084                                         val |= 0x4010000;
4085                                 tw32_f(MAC_SERDES_CFG, val);
4086                         }
4087
4088                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4089                 }
4090                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4091                         tg3_setup_flow_control(tp, 0, 0);
4092                         current_link_up = 1;
4093                 }
4094                 goto out;
4095         }
4096
4097         /* Want auto-negotiation.  */
4098         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4099
4100         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4101         if (flowctrl & ADVERTISE_1000XPAUSE)
4102                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4103         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4104                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4105
4106         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4107                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4108                     tp->serdes_counter &&
4109                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4110                                     MAC_STATUS_RCVD_CFG)) ==
4111                      MAC_STATUS_PCS_SYNCED)) {
4112                         tp->serdes_counter--;
4113                         current_link_up = 1;
4114                         goto out;
4115                 }
4116 restart_autoneg:
4117                 if (workaround)
4118                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4119                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4120                 udelay(5);
4121                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4122
4123                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4124                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4125         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4126                                  MAC_STATUS_SIGNAL_DET)) {
4127                 sg_dig_status = tr32(SG_DIG_STATUS);
4128                 mac_status = tr32(MAC_STATUS);
4129
4130                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4131                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4132                         u32 local_adv = 0, remote_adv = 0;
4133
4134                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4135                                 local_adv |= ADVERTISE_1000XPAUSE;
4136                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4137                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4138
4139                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4140                                 remote_adv |= LPA_1000XPAUSE;
4141                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4142                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4143
4144                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4145                         current_link_up = 1;
4146                         tp->serdes_counter = 0;
4147                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4148                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4149                         if (tp->serdes_counter)
4150                                 tp->serdes_counter--;
4151                         else {
4152                                 if (workaround) {
4153                                         u32 val = serdes_cfg;
4154
4155                                         if (port_a)
4156                                                 val |= 0xc010000;
4157                                         else
4158                                                 val |= 0x4010000;
4159
4160                                         tw32_f(MAC_SERDES_CFG, val);
4161                                 }
4162
4163                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4164                                 udelay(40);
4165
4166                                 /* Link parallel detection - link is up */
4167                                 /* only if we have PCS_SYNC and not */
4168                                 /* receiving config code words */
4169                                 mac_status = tr32(MAC_STATUS);
4170                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4171                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4172                                         tg3_setup_flow_control(tp, 0, 0);
4173                                         current_link_up = 1;
4174                                         tp->phy_flags |=
4175                                                 TG3_PHYFLG_PARALLEL_DETECT;
4176                                         tp->serdes_counter =
4177                                                 SERDES_PARALLEL_DET_TIMEOUT;
4178                                 } else
4179                                         goto restart_autoneg;
4180                         }
4181                 }
4182         } else {
4183                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4184                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4185         }
4186
4187 out:
4188         return current_link_up;
4189 }
4190
4191 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4192 {
4193         int current_link_up = 0;
4194
4195         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4196                 goto out;
4197
4198         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4199                 u32 txflags, rxflags;
4200                 int i;
4201
4202                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4203                         u32 local_adv = 0, remote_adv = 0;
4204
4205                         if (txflags & ANEG_CFG_PS1)
4206                                 local_adv |= ADVERTISE_1000XPAUSE;
4207                         if (txflags & ANEG_CFG_PS2)
4208                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4209
4210                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4211                                 remote_adv |= LPA_1000XPAUSE;
4212                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4213                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4214
4215                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4216
4217                         current_link_up = 1;
4218                 }
4219                 for (i = 0; i < 30; i++) {
4220                         udelay(20);
4221                         tw32_f(MAC_STATUS,
4222                                (MAC_STATUS_SYNC_CHANGED |
4223                                 MAC_STATUS_CFG_CHANGED));
4224                         udelay(40);
4225                         if ((tr32(MAC_STATUS) &
4226                              (MAC_STATUS_SYNC_CHANGED |
4227                               MAC_STATUS_CFG_CHANGED)) == 0)
4228                                 break;
4229                 }
4230
4231                 mac_status = tr32(MAC_STATUS);
4232                 if (current_link_up == 0 &&
4233                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4234                     !(mac_status & MAC_STATUS_RCVD_CFG))
4235                         current_link_up = 1;
4236         } else {
4237                 tg3_setup_flow_control(tp, 0, 0);
4238
4239                 /* Forcing 1000FD link up. */
4240                 current_link_up = 1;
4241
4242                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4243                 udelay(40);
4244
4245                 tw32_f(MAC_MODE, tp->mac_mode);
4246                 udelay(40);
4247         }
4248
4249 out:
4250         return current_link_up;
4251 }
4252
4253 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4254 {
4255         u32 orig_pause_cfg;
4256         u16 orig_active_speed;
4257         u8 orig_active_duplex;
4258         u32 mac_status;
4259         int current_link_up;
4260         int i;
4261
4262         orig_pause_cfg = tp->link_config.active_flowctrl;
4263         orig_active_speed = tp->link_config.active_speed;
4264         orig_active_duplex = tp->link_config.active_duplex;
4265
4266         if (!tg3_flag(tp, HW_AUTONEG) &&
4267             netif_carrier_ok(tp->dev) &&
4268             tg3_flag(tp, INIT_COMPLETE)) {
4269                 mac_status = tr32(MAC_STATUS);
4270                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4271                                MAC_STATUS_SIGNAL_DET |
4272                                MAC_STATUS_CFG_CHANGED |
4273                                MAC_STATUS_RCVD_CFG);
4274                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4275                                    MAC_STATUS_SIGNAL_DET)) {
4276                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4277                                             MAC_STATUS_CFG_CHANGED));
4278                         return 0;
4279                 }
4280         }
4281
4282         tw32_f(MAC_TX_AUTO_NEG, 0);
4283
4284         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4285         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4286         tw32_f(MAC_MODE, tp->mac_mode);
4287         udelay(40);
4288
4289         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4290                 tg3_init_bcm8002(tp);
4291
4292         /* Enable link change event even when serdes polling.  */
4293         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4294         udelay(40);
4295
4296         current_link_up = 0;
4297         mac_status = tr32(MAC_STATUS);
4298
4299         if (tg3_flag(tp, HW_AUTONEG))
4300                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4301         else
4302                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4303
4304         tp->napi[0].hw_status->status =
4305                 (SD_STATUS_UPDATED |
4306                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4307
4308         for (i = 0; i < 100; i++) {
4309                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4310                                     MAC_STATUS_CFG_CHANGED));
4311                 udelay(5);
4312                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4313                                          MAC_STATUS_CFG_CHANGED |
4314                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4315                         break;
4316         }
4317
4318         mac_status = tr32(MAC_STATUS);
4319         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4320                 current_link_up = 0;
4321                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4322                     tp->serdes_counter == 0) {
4323                         tw32_f(MAC_MODE, (tp->mac_mode |
4324                                           MAC_MODE_SEND_CONFIGS));
4325                         udelay(1);
4326                         tw32_f(MAC_MODE, tp->mac_mode);
4327                 }
4328         }
4329
4330         if (current_link_up == 1) {
4331                 tp->link_config.active_speed = SPEED_1000;
4332                 tp->link_config.active_duplex = DUPLEX_FULL;
4333                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4334                                     LED_CTRL_LNKLED_OVERRIDE |
4335                                     LED_CTRL_1000MBPS_ON));
4336         } else {
4337                 tp->link_config.active_speed = SPEED_INVALID;
4338                 tp->link_config.active_duplex = DUPLEX_INVALID;
4339                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4340                                     LED_CTRL_LNKLED_OVERRIDE |
4341                                     LED_CTRL_TRAFFIC_OVERRIDE));
4342         }
4343
4344         if (current_link_up != netif_carrier_ok(tp->dev)) {
4345                 if (current_link_up)
4346                         netif_carrier_on(tp->dev);
4347                 else
4348                         netif_carrier_off(tp->dev);
4349                 tg3_link_report(tp);
4350         } else {
4351                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4352                 if (orig_pause_cfg != now_pause_cfg ||
4353                     orig_active_speed != tp->link_config.active_speed ||
4354                     orig_active_duplex != tp->link_config.active_duplex)
4355                         tg3_link_report(tp);
4356         }
4357
4358         return 0;
4359 }
4360
4361 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4362 {
4363         int current_link_up, err = 0;
4364         u32 bmsr, bmcr;
4365         u16 current_speed;
4366         u8 current_duplex;
4367         u32 local_adv, remote_adv;
4368
4369         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4370         tw32_f(MAC_MODE, tp->mac_mode);
4371         udelay(40);
4372
4373         tw32(MAC_EVENT, 0);
4374
4375         tw32_f(MAC_STATUS,
4376              (MAC_STATUS_SYNC_CHANGED |
4377               MAC_STATUS_CFG_CHANGED |
4378               MAC_STATUS_MI_COMPLETION |
4379               MAC_STATUS_LNKSTATE_CHANGED));
4380         udelay(40);
4381
4382         if (force_reset)
4383                 tg3_phy_reset(tp);
4384
4385         current_link_up = 0;
4386         current_speed = SPEED_INVALID;
4387         current_duplex = DUPLEX_INVALID;
4388
4389         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4392                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4393                         bmsr |= BMSR_LSTATUS;
4394                 else
4395                         bmsr &= ~BMSR_LSTATUS;
4396         }
4397
4398         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4399
4400         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4401             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4402                 /* do nothing, just check for link up at the end */
4403         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4404                 u32 adv, new_adv;
4405
4406                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4407                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4408                                   ADVERTISE_1000XPAUSE |
4409                                   ADVERTISE_1000XPSE_ASYM |
4410                                   ADVERTISE_SLCT);
4411
4412                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4413
4414                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4415                         new_adv |= ADVERTISE_1000XHALF;
4416                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4417                         new_adv |= ADVERTISE_1000XFULL;
4418
4419                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4420                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4421                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4422                         tg3_writephy(tp, MII_BMCR, bmcr);
4423
4424                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4425                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4426                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427
4428                         return err;
4429                 }
4430         } else {
4431                 u32 new_bmcr;
4432
4433                 bmcr &= ~BMCR_SPEED1000;
4434                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4435
4436                 if (tp->link_config.duplex == DUPLEX_FULL)
4437                         new_bmcr |= BMCR_FULLDPLX;
4438
4439                 if (new_bmcr != bmcr) {
4440                         /* BMCR_SPEED1000 is a reserved bit that needs
4441                          * to be set on write.
4442                          */
4443                         new_bmcr |= BMCR_SPEED1000;
4444
4445                         /* Force a linkdown */
4446                         if (netif_carrier_ok(tp->dev)) {
4447                                 u32 adv;
4448
4449                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4450                                 adv &= ~(ADVERTISE_1000XFULL |
4451                                          ADVERTISE_1000XHALF |
4452                                          ADVERTISE_SLCT);
4453                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4454                                 tg3_writephy(tp, MII_BMCR, bmcr |
4455                                                            BMCR_ANRESTART |
4456                                                            BMCR_ANENABLE);
4457                                 udelay(10);
4458                                 netif_carrier_off(tp->dev);
4459                         }
4460                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4461                         bmcr = new_bmcr;
4462                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4464                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4465                             ASIC_REV_5714) {
4466                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4467                                         bmsr |= BMSR_LSTATUS;
4468                                 else
4469                                         bmsr &= ~BMSR_LSTATUS;
4470                         }
4471                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4472                 }
4473         }
4474
4475         if (bmsr & BMSR_LSTATUS) {
4476                 current_speed = SPEED_1000;
4477                 current_link_up = 1;
4478                 if (bmcr & BMCR_FULLDPLX)
4479                         current_duplex = DUPLEX_FULL;
4480                 else
4481                         current_duplex = DUPLEX_HALF;
4482
4483                 local_adv = 0;
4484                 remote_adv = 0;
4485
4486                 if (bmcr & BMCR_ANENABLE) {
4487                         u32 common;
4488
4489                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4490                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4491                         common = local_adv & remote_adv;
4492                         if (common & (ADVERTISE_1000XHALF |
4493                                       ADVERTISE_1000XFULL)) {
4494                                 if (common & ADVERTISE_1000XFULL)
4495                                         current_duplex = DUPLEX_FULL;
4496                                 else
4497                                         current_duplex = DUPLEX_HALF;
4498                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4499                                 /* Link is up via parallel detect */
4500                         } else {
4501                                 current_link_up = 0;
4502                         }
4503                 }
4504         }
4505
4506         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4507                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4508
4509         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4510         if (tp->link_config.active_duplex == DUPLEX_HALF)
4511                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4512
4513         tw32_f(MAC_MODE, tp->mac_mode);
4514         udelay(40);
4515
4516         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4517
4518         tp->link_config.active_speed = current_speed;
4519         tp->link_config.active_duplex = current_duplex;
4520
4521         if (current_link_up != netif_carrier_ok(tp->dev)) {
4522                 if (current_link_up)
4523                         netif_carrier_on(tp->dev);
4524                 else {
4525                         netif_carrier_off(tp->dev);
4526                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4527                 }
4528                 tg3_link_report(tp);
4529         }
4530         return err;
4531 }
4532
4533 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4534 {
4535         if (tp->serdes_counter) {
4536                 /* Give autoneg time to complete. */
4537                 tp->serdes_counter--;
4538                 return;
4539         }
4540
4541         if (!netif_carrier_ok(tp->dev) &&
4542             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4543                 u32 bmcr;
4544
4545                 tg3_readphy(tp, MII_BMCR, &bmcr);
4546                 if (bmcr & BMCR_ANENABLE) {
4547                         u32 phy1, phy2;
4548
4549                         /* Select shadow register 0x1f */
4550                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4551                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4552
4553                         /* Select expansion interrupt status register */
4554                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4555                                          MII_TG3_DSP_EXP1_INT_STAT);
4556                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4558
4559                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4560                                 /* We have signal detect and not receiving
4561                                  * config code words, link is up by parallel
4562                                  * detection.
4563                                  */
4564
4565                                 bmcr &= ~BMCR_ANENABLE;
4566                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4567                                 tg3_writephy(tp, MII_BMCR, bmcr);
4568                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4569                         }
4570                 }
4571         } else if (netif_carrier_ok(tp->dev) &&
4572                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4573                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4574                 u32 phy2;
4575
4576                 /* Select expansion interrupt status register */
4577                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4578                                  MII_TG3_DSP_EXP1_INT_STAT);
4579                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4580                 if (phy2 & 0x20) {
4581                         u32 bmcr;
4582
4583                         /* Config code words received, turn on autoneg. */
4584                         tg3_readphy(tp, MII_BMCR, &bmcr);
4585                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4586
4587                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4588
4589                 }
4590         }
4591 }
4592
4593 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4594 {
4595         u32 val;
4596         int err;
4597
4598         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4599                 err = tg3_setup_fiber_phy(tp, force_reset);
4600         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4601                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4602         else
4603                 err = tg3_setup_copper_phy(tp, force_reset);
4604
4605         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4606                 u32 scale;
4607
4608                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4609                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4610                         scale = 65;
4611                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4612                         scale = 6;
4613                 else
4614                         scale = 12;
4615
4616                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4617                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4618                 tw32(GRC_MISC_CFG, val);
4619         }
4620
4621         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4622               (6 << TX_LENGTHS_IPG_SHIFT);
4623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4624                 val |= tr32(MAC_TX_LENGTHS) &
4625                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4626                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4627
4628         if (tp->link_config.active_speed == SPEED_1000 &&
4629             tp->link_config.active_duplex == DUPLEX_HALF)
4630                 tw32(MAC_TX_LENGTHS, val |
4631                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4632         else
4633                 tw32(MAC_TX_LENGTHS, val |
4634                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4635
4636         if (!tg3_flag(tp, 5705_PLUS)) {
4637                 if (netif_carrier_ok(tp->dev)) {
4638                         tw32(HOSTCC_STAT_COAL_TICKS,
4639                              tp->coal.stats_block_coalesce_usecs);
4640                 } else {
4641                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4642                 }
4643         }
4644
4645         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4646                 val = tr32(PCIE_PWR_MGMT_THRESH);
4647                 if (!netif_carrier_ok(tp->dev))
4648                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4649                               tp->pwrmgmt_thresh;
4650                 else
4651                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4652                 tw32(PCIE_PWR_MGMT_THRESH, val);
4653         }
4654
4655         return err;
4656 }
4657
4658 static inline int tg3_irq_sync(struct tg3 *tp)
4659 {
4660         return tp->irq_sync;
4661 }
4662
4663 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4664 {
4665         int i;
4666
4667         dst = (u32 *)((u8 *)dst + off);
4668         for (i = 0; i < len; i += sizeof(u32))
4669                 *dst++ = tr32(off + i);
4670 }
4671
4672 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4673 {
4674         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4675         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4676         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4677         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4678         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4679         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4680         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4681         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4682         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4683         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4684         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4685         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4686         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4687         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4688         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4689         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4690         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4691         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4692         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4693
4694         if (tg3_flag(tp, SUPPORT_MSIX))
4695                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4696
4697         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4698         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4699         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4700         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4701         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4702         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4703         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4704         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4705
4706         if (!tg3_flag(tp, 5705_PLUS)) {
4707                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4708                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4709                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4710         }
4711
4712         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4713         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4714         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4715         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4716         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4717
4718         if (tg3_flag(tp, NVRAM))
4719                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4720 }
4721
4722 static void tg3_dump_state(struct tg3 *tp)
4723 {
4724         int i;
4725         u32 *regs;
4726
4727         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4728         if (!regs) {
4729                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4730                 return;
4731         }
4732
4733         if (tg3_flag(tp, PCI_EXPRESS)) {
4734                 /* Read up to but not including private PCI registers */
4735                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4736                         regs[i / sizeof(u32)] = tr32(i);
4737         } else
4738                 tg3_dump_legacy_regs(tp, regs);
4739
4740         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4741                 if (!regs[i + 0] && !regs[i + 1] &&
4742                     !regs[i + 2] && !regs[i + 3])
4743                         continue;
4744
4745                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4746                            i * 4,
4747                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4748         }
4749
4750         kfree(regs);
4751
4752         for (i = 0; i < tp->irq_cnt; i++) {
4753                 struct tg3_napi *tnapi = &tp->napi[i];
4754
4755                 /* SW status block */
4756                 netdev_err(tp->dev,
4757                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4758                            i,
4759                            tnapi->hw_status->status,
4760                            tnapi->hw_status->status_tag,
4761                            tnapi->hw_status->rx_jumbo_consumer,
4762                            tnapi->hw_status->rx_consumer,
4763                            tnapi->hw_status->rx_mini_consumer,
4764                            tnapi->hw_status->idx[0].rx_producer,
4765                            tnapi->hw_status->idx[0].tx_consumer);
4766
4767                 netdev_err(tp->dev,
4768                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4769                            i,
4770                            tnapi->last_tag, tnapi->last_irq_tag,
4771                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4772                            tnapi->rx_rcb_ptr,
4773                            tnapi->prodring.rx_std_prod_idx,
4774                            tnapi->prodring.rx_std_cons_idx,
4775                            tnapi->prodring.rx_jmb_prod_idx,
4776                            tnapi->prodring.rx_jmb_cons_idx);
4777         }
4778 }
4779
4780 /* This is called whenever we suspect that the system chipset is re-
4781  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4782  * is bogus tx completions. We try to recover by setting the
4783  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4784  * in the workqueue.
4785  */
4786 static void tg3_tx_recover(struct tg3 *tp)
4787 {
4788         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4789                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4790
4791         netdev_warn(tp->dev,
4792                     "The system may be re-ordering memory-mapped I/O "
4793                     "cycles to the network device, attempting to recover. "
4794                     "Please report the problem to the driver maintainer "
4795                     "and include system chipset information.\n");
4796
4797         spin_lock(&tp->lock);
4798         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4799         spin_unlock(&tp->lock);
4800 }
4801
4802 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4803 {
4804         /* Tell compiler to fetch tx indices from memory. */
4805         barrier();
4806         return tnapi->tx_pending -
4807                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4808 }
4809
4810 /* Tigon3 never reports partial packet sends.  So we do not
4811  * need special logic to handle SKBs that have not had all
4812  * of their frags sent yet, like SunGEM does.
4813  */
4814 static void tg3_tx(struct tg3_napi *tnapi)
4815 {
4816         struct tg3 *tp = tnapi->tp;
4817         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4818         u32 sw_idx = tnapi->tx_cons;
4819         struct netdev_queue *txq;
4820         int index = tnapi - tp->napi;
4821
4822         if (tg3_flag(tp, ENABLE_TSS))
4823                 index--;
4824
4825         txq = netdev_get_tx_queue(tp->dev, index);
4826
4827         while (sw_idx != hw_idx) {
4828                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4829                 struct sk_buff *skb = ri->skb;
4830                 int i, tx_bug = 0;
4831
4832                 if (unlikely(skb == NULL)) {
4833                         tg3_tx_recover(tp);
4834                         return;
4835                 }
4836
4837                 pci_unmap_single(tp->pdev,
4838                                  dma_unmap_addr(ri, mapping),
4839                                  skb_headlen(skb),
4840                                  PCI_DMA_TODEVICE);
4841
4842                 ri->skb = NULL;
4843
4844                 while (ri->fragmented) {
4845                         ri->fragmented = false;
4846                         sw_idx = NEXT_TX(sw_idx);
4847                         ri = &tnapi->tx_buffers[sw_idx];
4848                 }
4849
4850                 sw_idx = NEXT_TX(sw_idx);
4851
4852                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4853                         ri = &tnapi->tx_buffers[sw_idx];
4854                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4855                                 tx_bug = 1;
4856
4857                         pci_unmap_page(tp->pdev,
4858                                        dma_unmap_addr(ri, mapping),
4859                                        skb_shinfo(skb)->frags[i].size,
4860                                        PCI_DMA_TODEVICE);
4861
4862                         while (ri->fragmented) {
4863                                 ri->fragmented = false;
4864                                 sw_idx = NEXT_TX(sw_idx);
4865                                 ri = &tnapi->tx_buffers[sw_idx];
4866                         }
4867
4868                         sw_idx = NEXT_TX(sw_idx);
4869                 }
4870
4871                 dev_kfree_skb(skb);
4872
4873                 if (unlikely(tx_bug)) {
4874                         tg3_tx_recover(tp);
4875                         return;
4876                 }
4877         }
4878
4879         tnapi->tx_cons = sw_idx;
4880
4881         /* Need to make the tx_cons update visible to tg3_start_xmit()
4882          * before checking for netif_queue_stopped().  Without the
4883          * memory barrier, there is a small possibility that tg3_start_xmit()
4884          * will miss it and cause the queue to be stopped forever.
4885          */
4886         smp_mb();
4887
4888         if (unlikely(netif_tx_queue_stopped(txq) &&
4889                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4890                 __netif_tx_lock(txq, smp_processor_id());
4891                 if (netif_tx_queue_stopped(txq) &&
4892                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4893                         netif_tx_wake_queue(txq);
4894                 __netif_tx_unlock(txq);
4895         }
4896 }
4897
4898 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4899 {
4900         if (!ri->skb)
4901                 return;
4902
4903         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4904                          map_sz, PCI_DMA_FROMDEVICE);
4905         dev_kfree_skb_any(ri->skb);
4906         ri->skb = NULL;
4907 }
4908
4909 /* Returns size of skb allocated or < 0 on error.
4910  *
4911  * We only need to fill in the address because the other members
4912  * of the RX descriptor are invariant, see tg3_init_rings.
4913  *
4914  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4915  * posting buffers we only dirty the first cache line of the RX
4916  * descriptor (containing the address).  Whereas for the RX status
4917  * buffers the cpu only reads the last cacheline of the RX descriptor
4918  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4919  */
4920 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4921                             u32 opaque_key, u32 dest_idx_unmasked)
4922 {
4923         struct tg3_rx_buffer_desc *desc;
4924         struct ring_info *map;
4925         struct sk_buff *skb;
4926         dma_addr_t mapping;
4927         int skb_size, dest_idx;
4928
4929         switch (opaque_key) {
4930         case RXD_OPAQUE_RING_STD:
4931                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4932                 desc = &tpr->rx_std[dest_idx];
4933                 map = &tpr->rx_std_buffers[dest_idx];
4934                 skb_size = tp->rx_pkt_map_sz;
4935                 break;
4936
4937         case RXD_OPAQUE_RING_JUMBO:
4938                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4939                 desc = &tpr->rx_jmb[dest_idx].std;
4940                 map = &tpr->rx_jmb_buffers[dest_idx];
4941                 skb_size = TG3_RX_JMB_MAP_SZ;
4942                 break;
4943
4944         default:
4945                 return -EINVAL;
4946         }
4947
4948         /* Do not overwrite any of the map or rp information
4949          * until we are sure we can commit to a new buffer.
4950          *
4951          * Callers depend upon this behavior and assume that
4952          * we leave everything unchanged if we fail.
4953          */
4954         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4955         if (skb == NULL)
4956                 return -ENOMEM;
4957
4958         skb_reserve(skb, tp->rx_offset);
4959
4960         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4961                                  PCI_DMA_FROMDEVICE);
4962         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4963                 dev_kfree_skb(skb);
4964                 return -EIO;
4965         }
4966
4967         map->skb = skb;
4968         dma_unmap_addr_set(map, mapping, mapping);
4969
4970         desc->addr_hi = ((u64)mapping >> 32);
4971         desc->addr_lo = ((u64)mapping & 0xffffffff);
4972
4973         return skb_size;
4974 }
4975
4976 /* We only need to move over in the address because the other
4977  * members of the RX descriptor are invariant.  See notes above
4978  * tg3_alloc_rx_skb for full details.
4979  */
4980 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4981                            struct tg3_rx_prodring_set *dpr,
4982                            u32 opaque_key, int src_idx,
4983                            u32 dest_idx_unmasked)
4984 {
4985         struct tg3 *tp = tnapi->tp;
4986         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4987         struct ring_info *src_map, *dest_map;
4988         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4989         int dest_idx;
4990
4991         switch (opaque_key) {
4992         case RXD_OPAQUE_RING_STD:
4993                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4994                 dest_desc = &dpr->rx_std[dest_idx];
4995                 dest_map = &dpr->rx_std_buffers[dest_idx];
4996                 src_desc = &spr->rx_std[src_idx];
4997                 src_map = &spr->rx_std_buffers[src_idx];
4998                 break;
4999
5000         case RXD_OPAQUE_RING_JUMBO:
5001                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5002                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5003                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5004                 src_desc = &spr->rx_jmb[src_idx].std;
5005                 src_map = &spr->rx_jmb_buffers[src_idx];
5006                 break;
5007
5008         default:
5009                 return;
5010         }
5011
5012         dest_map->skb = src_map->skb;
5013         dma_unmap_addr_set(dest_map, mapping,
5014                            dma_unmap_addr(src_map, mapping));
5015         dest_desc->addr_hi = src_desc->addr_hi;
5016         dest_desc->addr_lo = src_desc->addr_lo;
5017
5018         /* Ensure that the update to the skb happens after the physical
5019          * addresses have been transferred to the new BD location.
5020          */
5021         smp_wmb();
5022
5023         src_map->skb = NULL;
5024 }
5025
5026 /* The RX ring scheme is composed of multiple rings which post fresh
5027  * buffers to the chip, and one special ring the chip uses to report
5028  * status back to the host.
5029  *
5030  * The special ring reports the status of received packets to the
5031  * host.  The chip does not write into the original descriptor the
5032  * RX buffer was obtained from.  The chip simply takes the original
5033  * descriptor as provided by the host, updates the status and length
5034  * field, then writes this into the next status ring entry.
5035  *
5036  * Each ring the host uses to post buffers to the chip is described
5037  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5038  * it is first placed into the on-chip ram.  When the packet's length
5039  * is known, it walks down the TG3_BDINFO entries to select the ring.
5040  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5041  * which is within the range of the new packet's length is chosen.
5042  *
5043  * The "separate ring for rx status" scheme may sound queer, but it makes
5044  * sense from a cache coherency perspective.  If only the host writes
5045  * to the buffer post rings, and only the chip writes to the rx status
5046  * rings, then cache lines never move beyond shared-modified state.
5047  * If both the host and chip were to write into the same ring, cache line
5048  * eviction could occur since both entities want it in an exclusive state.
5049  */
5050 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5051 {
5052         struct tg3 *tp = tnapi->tp;
5053         u32 work_mask, rx_std_posted = 0;
5054         u32 std_prod_idx, jmb_prod_idx;
5055         u32 sw_idx = tnapi->rx_rcb_ptr;
5056         u16 hw_idx;
5057         int received;
5058         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5059
5060         hw_idx = *(tnapi->rx_rcb_prod_idx);
5061         /*
5062          * We need to order the read of hw_idx and the read of
5063          * the opaque cookie.
5064          */
5065         rmb();
5066         work_mask = 0;
5067         received = 0;
5068         std_prod_idx = tpr->rx_std_prod_idx;
5069         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5070         while (sw_idx != hw_idx && budget > 0) {
5071                 struct ring_info *ri;
5072                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5073                 unsigned int len;
5074                 struct sk_buff *skb;
5075                 dma_addr_t dma_addr;
5076                 u32 opaque_key, desc_idx, *post_ptr;
5077
5078                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5079                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5080                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5081                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5082                         dma_addr = dma_unmap_addr(ri, mapping);
5083                         skb = ri->skb;
5084                         post_ptr = &std_prod_idx;
5085                         rx_std_posted++;
5086                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5087                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5088                         dma_addr = dma_unmap_addr(ri, mapping);
5089                         skb = ri->skb;
5090                         post_ptr = &jmb_prod_idx;
5091                 } else
5092                         goto next_pkt_nopost;
5093
5094                 work_mask |= opaque_key;
5095
5096                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5097                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5098                 drop_it:
5099                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5100                                        desc_idx, *post_ptr);
5101                 drop_it_no_recycle:
5102                         /* Other statistics kept track of by card. */
5103                         tp->rx_dropped++;
5104                         goto next_pkt;
5105                 }
5106
5107                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5108                       ETH_FCS_LEN;
5109
5110                 if (len > TG3_RX_COPY_THRESH(tp)) {
5111                         int skb_size;
5112
5113                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5114                                                     *post_ptr);
5115                         if (skb_size < 0)
5116                                 goto drop_it;
5117
5118                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5119                                          PCI_DMA_FROMDEVICE);
5120
5121                         /* Ensure that the update to the skb happens
5122                          * after the usage of the old DMA mapping.
5123                          */
5124                         smp_wmb();
5125
5126                         ri->skb = NULL;
5127
5128                         skb_put(skb, len);
5129                 } else {
5130                         struct sk_buff *copy_skb;
5131
5132                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5133                                        desc_idx, *post_ptr);
5134
5135                         copy_skb = netdev_alloc_skb(tp->dev, len +
5136                                                     TG3_RAW_IP_ALIGN);
5137                         if (copy_skb == NULL)
5138                                 goto drop_it_no_recycle;
5139
5140                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5141                         skb_put(copy_skb, len);
5142                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5143                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5144                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5145
5146                         /* We'll reuse the original ring buffer. */
5147                         skb = copy_skb;
5148                 }
5149
5150                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5151                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5152                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5153                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5154                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5155                 else
5156                         skb_checksum_none_assert(skb);
5157
5158                 skb->protocol = eth_type_trans(skb, tp->dev);
5159
5160                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5161                     skb->protocol != htons(ETH_P_8021Q)) {
5162                         dev_kfree_skb(skb);
5163                         goto drop_it_no_recycle;
5164                 }
5165
5166                 if (desc->type_flags & RXD_FLAG_VLAN &&
5167                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5168                         __vlan_hwaccel_put_tag(skb,
5169                                                desc->err_vlan & RXD_VLAN_MASK);
5170
5171                 napi_gro_receive(&tnapi->napi, skb);
5172
5173                 received++;
5174                 budget--;
5175
5176 next_pkt:
5177                 (*post_ptr)++;
5178
5179                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5180                         tpr->rx_std_prod_idx = std_prod_idx &
5181                                                tp->rx_std_ring_mask;
5182                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5183                                      tpr->rx_std_prod_idx);
5184                         work_mask &= ~RXD_OPAQUE_RING_STD;
5185                         rx_std_posted = 0;
5186                 }
5187 next_pkt_nopost:
5188                 sw_idx++;
5189                 sw_idx &= tp->rx_ret_ring_mask;
5190
5191                 /* Refresh hw_idx to see if there is new work */
5192                 if (sw_idx == hw_idx) {
5193                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5194                         rmb();
5195                 }
5196         }
5197
5198         /* ACK the status ring. */
5199         tnapi->rx_rcb_ptr = sw_idx;
5200         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5201
5202         /* Refill RX ring(s). */
5203         if (!tg3_flag(tp, ENABLE_RSS)) {
5204                 if (work_mask & RXD_OPAQUE_RING_STD) {
5205                         tpr->rx_std_prod_idx = std_prod_idx &
5206                                                tp->rx_std_ring_mask;
5207                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5208                                      tpr->rx_std_prod_idx);
5209                 }
5210                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5211                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5212                                                tp->rx_jmb_ring_mask;
5213                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5214                                      tpr->rx_jmb_prod_idx);
5215                 }
5216                 mmiowb();
5217         } else if (work_mask) {
5218                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5219                  * updated before the producer indices can be updated.
5220                  */
5221                 smp_wmb();
5222
5223                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5224                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5225
5226                 if (tnapi != &tp->napi[1])
5227                         napi_schedule(&tp->napi[1].napi);
5228         }
5229
5230         return received;
5231 }
5232
5233 static void tg3_poll_link(struct tg3 *tp)
5234 {
5235         /* handle link change and other phy events */
5236         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5237                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5238
5239                 if (sblk->status & SD_STATUS_LINK_CHG) {
5240                         sblk->status = SD_STATUS_UPDATED |
5241                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5242                         spin_lock(&tp->lock);
5243                         if (tg3_flag(tp, USE_PHYLIB)) {
5244                                 tw32_f(MAC_STATUS,
5245                                      (MAC_STATUS_SYNC_CHANGED |
5246                                       MAC_STATUS_CFG_CHANGED |
5247                                       MAC_STATUS_MI_COMPLETION |
5248                                       MAC_STATUS_LNKSTATE_CHANGED));
5249                                 udelay(40);
5250                         } else
5251                                 tg3_setup_phy(tp, 0);
5252                         spin_unlock(&tp->lock);
5253                 }
5254         }
5255 }
5256
5257 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5258                                 struct tg3_rx_prodring_set *dpr,
5259                                 struct tg3_rx_prodring_set *spr)
5260 {
5261         u32 si, di, cpycnt, src_prod_idx;
5262         int i, err = 0;
5263
5264         while (1) {
5265                 src_prod_idx = spr->rx_std_prod_idx;
5266
5267                 /* Make sure updates to the rx_std_buffers[] entries and the
5268                  * standard producer index are seen in the correct order.
5269                  */
5270                 smp_rmb();
5271
5272                 if (spr->rx_std_cons_idx == src_prod_idx)
5273                         break;
5274
5275                 if (spr->rx_std_cons_idx < src_prod_idx)
5276                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5277                 else
5278                         cpycnt = tp->rx_std_ring_mask + 1 -
5279                                  spr->rx_std_cons_idx;
5280
5281                 cpycnt = min(cpycnt,
5282                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5283
5284                 si = spr->rx_std_cons_idx;
5285                 di = dpr->rx_std_prod_idx;
5286
5287                 for (i = di; i < di + cpycnt; i++) {
5288                         if (dpr->rx_std_buffers[i].skb) {
5289                                 cpycnt = i - di;
5290                                 err = -ENOSPC;
5291                                 break;
5292                         }
5293                 }
5294
5295                 if (!cpycnt)
5296                         break;
5297
5298                 /* Ensure that updates to the rx_std_buffers ring and the
5299                  * shadowed hardware producer ring from tg3_recycle_skb() are
5300                  * ordered correctly WRT the skb check above.
5301                  */
5302                 smp_rmb();
5303
5304                 memcpy(&dpr->rx_std_buffers[di],
5305                        &spr->rx_std_buffers[si],
5306                        cpycnt * sizeof(struct ring_info));
5307
5308                 for (i = 0; i < cpycnt; i++, di++, si++) {
5309                         struct tg3_rx_buffer_desc *sbd, *dbd;
5310                         sbd = &spr->rx_std[si];
5311                         dbd = &dpr->rx_std[di];
5312                         dbd->addr_hi = sbd->addr_hi;
5313                         dbd->addr_lo = sbd->addr_lo;
5314                 }
5315
5316                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5317                                        tp->rx_std_ring_mask;
5318                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5319                                        tp->rx_std_ring_mask;
5320         }
5321
5322         while (1) {
5323                 src_prod_idx = spr->rx_jmb_prod_idx;
5324
5325                 /* Make sure updates to the rx_jmb_buffers[] entries and
5326                  * the jumbo producer index are seen in the correct order.
5327                  */
5328                 smp_rmb();
5329
5330                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5331                         break;
5332
5333                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5334                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5335                 else
5336                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5337                                  spr->rx_jmb_cons_idx;
5338
5339                 cpycnt = min(cpycnt,
5340                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5341
5342                 si = spr->rx_jmb_cons_idx;
5343                 di = dpr->rx_jmb_prod_idx;
5344
5345                 for (i = di; i < di + cpycnt; i++) {
5346                         if (dpr->rx_jmb_buffers[i].skb) {
5347                                 cpycnt = i - di;
5348                                 err = -ENOSPC;
5349                                 break;
5350                         }
5351                 }
5352
5353                 if (!cpycnt)
5354                         break;
5355
5356                 /* Ensure that updates to the rx_jmb_buffers ring and the
5357                  * shadowed hardware producer ring from tg3_recycle_skb() are
5358                  * ordered correctly WRT the skb check above.
5359                  */
5360                 smp_rmb();
5361
5362                 memcpy(&dpr->rx_jmb_buffers[di],
5363                        &spr->rx_jmb_buffers[si],
5364                        cpycnt * sizeof(struct ring_info));
5365
5366                 for (i = 0; i < cpycnt; i++, di++, si++) {
5367                         struct tg3_rx_buffer_desc *sbd, *dbd;
5368                         sbd = &spr->rx_jmb[si].std;
5369                         dbd = &dpr->rx_jmb[di].std;
5370                         dbd->addr_hi = sbd->addr_hi;
5371                         dbd->addr_lo = sbd->addr_lo;
5372                 }
5373
5374                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5375                                        tp->rx_jmb_ring_mask;
5376                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5377                                        tp->rx_jmb_ring_mask;
5378         }
5379
5380         return err;
5381 }
5382
5383 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5384 {
5385         struct tg3 *tp = tnapi->tp;
5386
5387         /* run TX completion thread */
5388         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5389                 tg3_tx(tnapi);
5390                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5391                         return work_done;
5392         }
5393
5394         /* run RX thread, within the bounds set by NAPI.
5395          * All RX "locking" is done by ensuring outside
5396          * code synchronizes with tg3->napi.poll()
5397          */
5398         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5399                 work_done += tg3_rx(tnapi, budget - work_done);
5400
5401         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5402                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5403                 int i, err = 0;
5404                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5405                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5406
5407                 for (i = 1; i < tp->irq_cnt; i++)
5408                         err |= tg3_rx_prodring_xfer(tp, dpr,
5409                                                     &tp->napi[i].prodring);
5410
5411                 wmb();
5412
5413                 if (std_prod_idx != dpr->rx_std_prod_idx)
5414                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5415                                      dpr->rx_std_prod_idx);
5416
5417                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5418                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5419                                      dpr->rx_jmb_prod_idx);
5420
5421                 mmiowb();
5422
5423                 if (err)
5424                         tw32_f(HOSTCC_MODE, tp->coal_now);
5425         }
5426
5427         return work_done;
5428 }
5429
5430 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5431 {
5432         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5433         struct tg3 *tp = tnapi->tp;
5434         int work_done = 0;
5435         struct tg3_hw_status *sblk = tnapi->hw_status;
5436
5437         while (1) {
5438                 work_done = tg3_poll_work(tnapi, work_done, budget);
5439
5440                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5441                         goto tx_recovery;
5442
5443                 if (unlikely(work_done >= budget))
5444                         break;
5445
5446                 /* tp->last_tag is used in tg3_int_reenable() below
5447                  * to tell the hw how much work has been processed,
5448                  * so we must read it before checking for more work.
5449                  */
5450                 tnapi->last_tag = sblk->status_tag;
5451                 tnapi->last_irq_tag = tnapi->last_tag;
5452                 rmb();
5453
5454                 /* check for RX/TX work to do */
5455                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5456                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5457                         napi_complete(napi);
5458                         /* Reenable interrupts. */
5459                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5460                         mmiowb();
5461                         break;
5462                 }
5463         }
5464
5465         return work_done;
5466
5467 tx_recovery:
5468         /* work_done is guaranteed to be less than budget. */
5469         napi_complete(napi);
5470         schedule_work(&tp->reset_task);
5471         return work_done;
5472 }
5473
5474 static void tg3_process_error(struct tg3 *tp)
5475 {
5476         u32 val;
5477         bool real_error = false;
5478
5479         if (tg3_flag(tp, ERROR_PROCESSED))
5480                 return;
5481
5482         /* Check Flow Attention register */
5483         val = tr32(HOSTCC_FLOW_ATTN);
5484         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5485                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5486                 real_error = true;
5487         }
5488
5489         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5490                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5491                 real_error = true;
5492         }
5493
5494         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5495                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5496                 real_error = true;
5497         }
5498
5499         if (!real_error)
5500                 return;
5501
5502         tg3_dump_state(tp);
5503
5504         tg3_flag_set(tp, ERROR_PROCESSED);
5505         schedule_work(&tp->reset_task);
5506 }
5507
5508 static int tg3_poll(struct napi_struct *napi, int budget)
5509 {
5510         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5511         struct tg3 *tp = tnapi->tp;
5512         int work_done = 0;
5513         struct tg3_hw_status *sblk = tnapi->hw_status;
5514
5515         while (1) {
5516                 if (sblk->status & SD_STATUS_ERROR)
5517                         tg3_process_error(tp);
5518
5519                 tg3_poll_link(tp);
5520
5521                 work_done = tg3_poll_work(tnapi, work_done, budget);
5522
5523                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5524                         goto tx_recovery;
5525
5526                 if (unlikely(work_done >= budget))
5527                         break;
5528
5529                 if (tg3_flag(tp, TAGGED_STATUS)) {
5530                         /* tp->last_tag is used in tg3_int_reenable() below
5531                          * to tell the hw how much work has been processed,
5532                          * so we must read it before checking for more work.
5533                          */
5534                         tnapi->last_tag = sblk->status_tag;
5535                         tnapi->last_irq_tag = tnapi->last_tag;
5536                         rmb();
5537                 } else
5538                         sblk->status &= ~SD_STATUS_UPDATED;
5539
5540                 if (likely(!tg3_has_work(tnapi))) {
5541                         napi_complete(napi);
5542                         tg3_int_reenable(tnapi);
5543                         break;
5544                 }
5545         }
5546
5547         return work_done;
5548
5549 tx_recovery:
5550         /* work_done is guaranteed to be less than budget. */
5551         napi_complete(napi);
5552         schedule_work(&tp->reset_task);
5553         return work_done;
5554 }
5555
5556 static void tg3_napi_disable(struct tg3 *tp)
5557 {
5558         int i;
5559
5560         for (i = tp->irq_cnt - 1; i >= 0; i--)
5561                 napi_disable(&tp->napi[i].napi);
5562 }
5563
5564 static void tg3_napi_enable(struct tg3 *tp)
5565 {
5566         int i;
5567
5568         for (i = 0; i < tp->irq_cnt; i++)
5569                 napi_enable(&tp->napi[i].napi);
5570 }
5571
5572 static void tg3_napi_init(struct tg3 *tp)
5573 {
5574         int i;
5575
5576         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5577         for (i = 1; i < tp->irq_cnt; i++)
5578                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5579 }
5580
5581 static void tg3_napi_fini(struct tg3 *tp)
5582 {
5583         int i;
5584
5585         for (i = 0; i < tp->irq_cnt; i++)
5586                 netif_napi_del(&tp->napi[i].napi);
5587 }
5588
5589 static inline void tg3_netif_stop(struct tg3 *tp)
5590 {
5591         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5592         tg3_napi_disable(tp);
5593         netif_tx_disable(tp->dev);
5594 }
5595
5596 static inline void tg3_netif_start(struct tg3 *tp)
5597 {
5598         /* NOTE: unconditional netif_tx_wake_all_queues is only
5599          * appropriate so long as all callers are assured to
5600          * have free tx slots (such as after tg3_init_hw)
5601          */
5602         netif_tx_wake_all_queues(tp->dev);
5603
5604         tg3_napi_enable(tp);
5605         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5606         tg3_enable_ints(tp);
5607 }
5608
5609 static void tg3_irq_quiesce(struct tg3 *tp)
5610 {
5611         int i;
5612
5613         BUG_ON(tp->irq_sync);
5614
5615         tp->irq_sync = 1;
5616         smp_mb();
5617
5618         for (i = 0; i < tp->irq_cnt; i++)
5619                 synchronize_irq(tp->napi[i].irq_vec);
5620 }
5621
5622 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5623  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5624  * with as well.  Most of the time, this is not necessary except when
5625  * shutting down the device.
5626  */
5627 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5628 {
5629         spin_lock_bh(&tp->lock);
5630         if (irq_sync)
5631                 tg3_irq_quiesce(tp);
5632 }
5633
5634 static inline void tg3_full_unlock(struct tg3 *tp)
5635 {
5636         spin_unlock_bh(&tp->lock);
5637 }
5638
5639 /* One-shot MSI handler - Chip automatically disables interrupt
5640  * after sending MSI so driver doesn't have to do it.
5641  */
5642 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5643 {
5644         struct tg3_napi *tnapi = dev_id;
5645         struct tg3 *tp = tnapi->tp;
5646
5647         prefetch(tnapi->hw_status);
5648         if (tnapi->rx_rcb)
5649                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5650
5651         if (likely(!tg3_irq_sync(tp)))
5652                 napi_schedule(&tnapi->napi);
5653
5654         return IRQ_HANDLED;
5655 }
5656
5657 /* MSI ISR - No need to check for interrupt sharing and no need to
5658  * flush status block and interrupt mailbox. PCI ordering rules
5659  * guarantee that MSI will arrive after the status block.
5660  */
5661 static irqreturn_t tg3_msi(int irq, void *dev_id)
5662 {
5663         struct tg3_napi *tnapi = dev_id;
5664         struct tg3 *tp = tnapi->tp;
5665
5666         prefetch(tnapi->hw_status);
5667         if (tnapi->rx_rcb)
5668                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5669         /*
5670          * Writing any value to intr-mbox-0 clears PCI INTA# and
5671          * chip-internal interrupt pending events.
5672          * Writing non-zero to intr-mbox-0 additional tells the
5673          * NIC to stop sending us irqs, engaging "in-intr-handler"
5674          * event coalescing.
5675          */
5676         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5677         if (likely(!tg3_irq_sync(tp)))
5678                 napi_schedule(&tnapi->napi);
5679
5680         return IRQ_RETVAL(1);
5681 }
5682
5683 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5684 {
5685         struct tg3_napi *tnapi = dev_id;
5686         struct tg3 *tp = tnapi->tp;
5687         struct tg3_hw_status *sblk = tnapi->hw_status;
5688         unsigned int handled = 1;
5689
5690         /* In INTx mode, it is possible for the interrupt to arrive at
5691          * the CPU before the status block posted prior to the interrupt.
5692          * Reading the PCI State register will confirm whether the
5693          * interrupt is ours and will flush the status block.
5694          */
5695         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5696                 if (tg3_flag(tp, CHIP_RESETTING) ||
5697                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5698                         handled = 0;
5699                         goto out;
5700                 }
5701         }
5702
5703         /*
5704          * Writing any value to intr-mbox-0 clears PCI INTA# and
5705          * chip-internal interrupt pending events.
5706          * Writing non-zero to intr-mbox-0 additional tells the
5707          * NIC to stop sending us irqs, engaging "in-intr-handler"
5708          * event coalescing.
5709          *
5710          * Flush the mailbox to de-assert the IRQ immediately to prevent
5711          * spurious interrupts.  The flush impacts performance but
5712          * excessive spurious interrupts can be worse in some cases.
5713          */
5714         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5715         if (tg3_irq_sync(tp))
5716                 goto out;
5717         sblk->status &= ~SD_STATUS_UPDATED;
5718         if (likely(tg3_has_work(tnapi))) {
5719                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5720                 napi_schedule(&tnapi->napi);
5721         } else {
5722                 /* No work, shared interrupt perhaps?  re-enable
5723                  * interrupts, and flush that PCI write
5724                  */
5725                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5726                                0x00000000);
5727         }
5728 out:
5729         return IRQ_RETVAL(handled);
5730 }
5731
5732 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5733 {
5734         struct tg3_napi *tnapi = dev_id;
5735         struct tg3 *tp = tnapi->tp;
5736         struct tg3_hw_status *sblk = tnapi->hw_status;
5737         unsigned int handled = 1;
5738
5739         /* In INTx mode, it is possible for the interrupt to arrive at
5740          * the CPU before the status block posted prior to the interrupt.
5741          * Reading the PCI State register will confirm whether the
5742          * interrupt is ours and will flush the status block.
5743          */
5744         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5745                 if (tg3_flag(tp, CHIP_RESETTING) ||
5746                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5747                         handled = 0;
5748                         goto out;
5749                 }
5750         }
5751
5752         /*
5753          * writing any value to intr-mbox-0 clears PCI INTA# and
5754          * chip-internal interrupt pending events.
5755          * writing non-zero to intr-mbox-0 additional tells the
5756          * NIC to stop sending us irqs, engaging "in-intr-handler"
5757          * event coalescing.
5758          *
5759          * Flush the mailbox to de-assert the IRQ immediately to prevent
5760          * spurious interrupts.  The flush impacts performance but
5761          * excessive spurious interrupts can be worse in some cases.
5762          */
5763         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5764
5765         /*
5766          * In a shared interrupt configuration, sometimes other devices'
5767          * interrupts will scream.  We record the current status tag here
5768          * so that the above check can report that the screaming interrupts
5769          * are unhandled.  Eventually they will be silenced.
5770          */
5771         tnapi->last_irq_tag = sblk->status_tag;
5772
5773         if (tg3_irq_sync(tp))
5774                 goto out;
5775
5776         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5777
5778         napi_schedule(&tnapi->napi);
5779
5780 out:
5781         return IRQ_RETVAL(handled);
5782 }
5783
5784 /* ISR for interrupt test */
5785 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5786 {
5787         struct tg3_napi *tnapi = dev_id;
5788         struct tg3 *tp = tnapi->tp;
5789         struct tg3_hw_status *sblk = tnapi->hw_status;
5790
5791         if ((sblk->status & SD_STATUS_UPDATED) ||
5792             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5793                 tg3_disable_ints(tp);
5794                 return IRQ_RETVAL(1);
5795         }
5796         return IRQ_RETVAL(0);
5797 }
5798
5799 static int tg3_init_hw(struct tg3 *, int);
5800 static int tg3_halt(struct tg3 *, int, int);
5801
5802 /* Restart hardware after configuration changes, self-test, etc.
5803  * Invoked with tp->lock held.
5804  */
5805 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5806         __releases(tp->lock)
5807         __acquires(tp->lock)
5808 {
5809         int err;
5810
5811         err = tg3_init_hw(tp, reset_phy);
5812         if (err) {
5813                 netdev_err(tp->dev,
5814                            "Failed to re-initialize device, aborting\n");
5815                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5816                 tg3_full_unlock(tp);
5817                 del_timer_sync(&tp->timer);
5818                 tp->irq_sync = 0;
5819                 tg3_napi_enable(tp);
5820                 dev_close(tp->dev);
5821                 tg3_full_lock(tp, 0);
5822         }
5823         return err;
5824 }
5825
5826 #ifdef CONFIG_NET_POLL_CONTROLLER
5827 static void tg3_poll_controller(struct net_device *dev)
5828 {
5829         int i;
5830         struct tg3 *tp = netdev_priv(dev);
5831
5832         for (i = 0; i < tp->irq_cnt; i++)
5833                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5834 }
5835 #endif
5836
5837 static void tg3_reset_task(struct work_struct *work)
5838 {
5839         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5840         int err;
5841         unsigned int restart_timer;
5842
5843         tg3_full_lock(tp, 0);
5844
5845         if (!netif_running(tp->dev)) {
5846                 tg3_full_unlock(tp);
5847                 return;
5848         }
5849
5850         tg3_full_unlock(tp);
5851
5852         tg3_phy_stop(tp);
5853
5854         tg3_netif_stop(tp);
5855
5856         tg3_full_lock(tp, 1);
5857
5858         restart_timer = tg3_flag(tp, RESTART_TIMER);
5859         tg3_flag_clear(tp, RESTART_TIMER);
5860
5861         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5862                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5863                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5864                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5865                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5866         }
5867
5868         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5869         err = tg3_init_hw(tp, 1);
5870         if (err)
5871                 goto out;
5872
5873         tg3_netif_start(tp);
5874
5875         if (restart_timer)
5876                 mod_timer(&tp->timer, jiffies + 1);
5877
5878 out:
5879         tg3_full_unlock(tp);
5880
5881         if (!err)
5882                 tg3_phy_start(tp);
5883 }
5884
5885 static void tg3_tx_timeout(struct net_device *dev)
5886 {
5887         struct tg3 *tp = netdev_priv(dev);
5888
5889         if (netif_msg_tx_err(tp)) {
5890                 netdev_err(dev, "transmit timed out, resetting\n");
5891                 tg3_dump_state(tp);
5892         }
5893
5894         schedule_work(&tp->reset_task);
5895 }
5896
5897 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5898 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5899 {
5900         u32 base = (u32) mapping & 0xffffffff;
5901
5902         return (base > 0xffffdcc0) && (base + len + 8 < base);
5903 }
5904
5905 /* Test for DMA addresses > 40-bit */
5906 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5907                                           int len)
5908 {
5909 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5910         if (tg3_flag(tp, 40BIT_DMA_BUG))
5911                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5912         return 0;
5913 #else
5914         return 0;
5915 #endif
5916 }
5917
5918 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5919                                  dma_addr_t mapping, u32 len, u32 flags,
5920                                  u32 mss, u32 vlan)
5921 {
5922         txbd->addr_hi = ((u64) mapping >> 32);
5923         txbd->addr_lo = ((u64) mapping & 0xffffffff);
5924         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5925         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5926 }
5927
5928 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5929                             dma_addr_t map, u32 len, u32 flags,
5930                             u32 mss, u32 vlan)
5931 {
5932         struct tg3 *tp = tnapi->tp;
5933         bool hwbug = false;
5934
5935         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5936                 hwbug = 1;
5937
5938         if (tg3_4g_overflow_test(map, len))
5939                 hwbug = 1;
5940
5941         if (tg3_40bit_overflow_test(tp, map, len))
5942                 hwbug = 1;
5943
5944         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5945                 u32 tmp_flag = flags & ~TXD_FLAG_END;
5946                 while (len > TG3_TX_BD_DMA_MAX) {
5947                         u32 frag_len = TG3_TX_BD_DMA_MAX;
5948                         len -= TG3_TX_BD_DMA_MAX;
5949
5950                         if (len) {
5951                                 tnapi->tx_buffers[*entry].fragmented = true;
5952                                 /* Avoid the 8byte DMA problem */
5953                                 if (len <= 8) {
5954                                         len += TG3_TX_BD_DMA_MAX / 2;
5955                                         frag_len = TG3_TX_BD_DMA_MAX / 2;
5956                                 }
5957                         } else
5958                                 tmp_flag = flags;
5959
5960                         if (*budget) {
5961                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5962                                               frag_len, tmp_flag, mss, vlan);
5963                                 (*budget)--;
5964                                 *entry = NEXT_TX(*entry);
5965                         } else {
5966                                 hwbug = 1;
5967                                 break;
5968                         }
5969
5970                         map += frag_len;
5971                 }
5972
5973                 if (len) {
5974                         if (*budget) {
5975                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5976                                               len, flags, mss, vlan);
5977                                 (*budget)--;
5978                                 *entry = NEXT_TX(*entry);
5979                         } else {
5980                                 hwbug = 1;
5981                         }
5982                 }
5983         } else {
5984                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5985                               len, flags, mss, vlan);
5986                 *entry = NEXT_TX(*entry);
5987         }
5988
5989         return hwbug;
5990 }
5991
5992 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5993 {
5994         int i;
5995         struct sk_buff *skb;
5996         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5997
5998         skb = txb->skb;
5999         txb->skb = NULL;
6000
6001         pci_unmap_single(tnapi->tp->pdev,
6002                          dma_unmap_addr(txb, mapping),
6003                          skb_headlen(skb),
6004                          PCI_DMA_TODEVICE);
6005
6006         while (txb->fragmented) {
6007                 txb->fragmented = false;
6008                 entry = NEXT_TX(entry);
6009                 txb = &tnapi->tx_buffers[entry];
6010         }
6011
6012         for (i = 0; i < last; i++) {
6013                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6014
6015                 entry = NEXT_TX(entry);
6016                 txb = &tnapi->tx_buffers[entry];
6017
6018                 pci_unmap_page(tnapi->tp->pdev,
6019                                dma_unmap_addr(txb, mapping),
6020                                frag->size, PCI_DMA_TODEVICE);
6021
6022                 while (txb->fragmented) {
6023                         txb->fragmented = false;
6024                         entry = NEXT_TX(entry);
6025                         txb = &tnapi->tx_buffers[entry];
6026                 }
6027         }
6028 }
6029
6030 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6031 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6032                                        struct sk_buff *skb,
6033                                        u32 *entry, u32 *budget,
6034                                        u32 base_flags, u32 mss, u32 vlan)
6035 {
6036         struct tg3 *tp = tnapi->tp;
6037         struct sk_buff *new_skb;
6038         dma_addr_t new_addr = 0;
6039         int ret = 0;
6040
6041         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6042                 new_skb = skb_copy(skb, GFP_ATOMIC);
6043         else {
6044                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6045
6046                 new_skb = skb_copy_expand(skb,
6047                                           skb_headroom(skb) + more_headroom,
6048                                           skb_tailroom(skb), GFP_ATOMIC);
6049         }
6050
6051         if (!new_skb) {
6052                 ret = -1;
6053         } else {
6054                 /* New SKB is guaranteed to be linear. */
6055                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6056                                           PCI_DMA_TODEVICE);
6057                 /* Make sure the mapping succeeded */
6058                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6059                         dev_kfree_skb(new_skb);
6060                         ret = -1;
6061                 } else {
6062                         base_flags |= TXD_FLAG_END;
6063
6064                         tnapi->tx_buffers[*entry].skb = new_skb;
6065                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6066                                            mapping, new_addr);
6067
6068                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6069                                             new_skb->len, base_flags,
6070                                             mss, vlan)) {
6071                                 tg3_tx_skb_unmap(tnapi, *entry, 0);
6072                                 dev_kfree_skb(new_skb);
6073                                 ret = -1;
6074                         }
6075                 }
6076         }
6077
6078         dev_kfree_skb(skb);
6079
6080         return ret;
6081 }
6082
6083 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6084
6085 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6086  * TSO header is greater than 80 bytes.
6087  */
6088 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6089 {
6090         struct sk_buff *segs, *nskb;
6091         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6092
6093         /* Estimate the number of fragments in the worst case */
6094         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6095                 netif_stop_queue(tp->dev);
6096
6097                 /* netif_tx_stop_queue() must be done before checking
6098                  * checking tx index in tg3_tx_avail() below, because in
6099                  * tg3_tx(), we update tx index before checking for
6100                  * netif_tx_queue_stopped().
6101                  */
6102                 smp_mb();
6103                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6104                         return NETDEV_TX_BUSY;
6105
6106                 netif_wake_queue(tp->dev);
6107         }
6108
6109         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6110         if (IS_ERR(segs))
6111                 goto tg3_tso_bug_end;
6112
6113         do {
6114                 nskb = segs;
6115                 segs = segs->next;
6116                 nskb->next = NULL;
6117                 tg3_start_xmit(nskb, tp->dev);
6118         } while (segs);
6119
6120 tg3_tso_bug_end:
6121         dev_kfree_skb(skb);
6122
6123         return NETDEV_TX_OK;
6124 }
6125
6126 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6127  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6128  */
6129 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6130 {
6131         struct tg3 *tp = netdev_priv(dev);
6132         u32 len, entry, base_flags, mss, vlan = 0;
6133         u32 budget;
6134         int i = -1, would_hit_hwbug;
6135         dma_addr_t mapping;
6136         struct tg3_napi *tnapi;
6137         struct netdev_queue *txq;
6138         unsigned int last;
6139
6140         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6141         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6142         if (tg3_flag(tp, ENABLE_TSS))
6143                 tnapi++;
6144
6145         budget = tg3_tx_avail(tnapi);
6146
6147         /* We are running in BH disabled context with netif_tx_lock
6148          * and TX reclaim runs via tp->napi.poll inside of a software
6149          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6150          * no IRQ context deadlocks to worry about either.  Rejoice!
6151          */
6152         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6153                 if (!netif_tx_queue_stopped(txq)) {
6154                         netif_tx_stop_queue(txq);
6155
6156                         /* This is a hard error, log it. */
6157                         netdev_err(dev,
6158                                    "BUG! Tx Ring full when queue awake!\n");
6159                 }
6160                 return NETDEV_TX_BUSY;
6161         }
6162
6163         entry = tnapi->tx_prod;
6164         base_flags = 0;
6165         if (skb->ip_summed == CHECKSUM_PARTIAL)
6166                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6167
6168         mss = skb_shinfo(skb)->gso_size;
6169         if (mss) {
6170                 struct iphdr *iph;
6171                 u32 tcp_opt_len, hdr_len;
6172
6173                 if (skb_header_cloned(skb) &&
6174                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6175                         dev_kfree_skb(skb);
6176                         goto out_unlock;
6177                 }
6178
6179                 iph = ip_hdr(skb);
6180                 tcp_opt_len = tcp_optlen(skb);
6181
6182                 if (skb_is_gso_v6(skb)) {
6183                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6184                 } else {
6185                         u32 ip_tcp_len;
6186
6187                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6188                         hdr_len = ip_tcp_len + tcp_opt_len;
6189
6190                         iph->check = 0;
6191                         iph->tot_len = htons(mss + hdr_len);
6192                 }
6193
6194                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6195                     tg3_flag(tp, TSO_BUG))
6196                         return tg3_tso_bug(tp, skb);
6197
6198                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6199                                TXD_FLAG_CPU_POST_DMA);
6200
6201                 if (tg3_flag(tp, HW_TSO_1) ||
6202                     tg3_flag(tp, HW_TSO_2) ||
6203                     tg3_flag(tp, HW_TSO_3)) {
6204                         tcp_hdr(skb)->check = 0;
6205                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6206                 } else
6207                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6208                                                                  iph->daddr, 0,
6209                                                                  IPPROTO_TCP,
6210                                                                  0);
6211
6212                 if (tg3_flag(tp, HW_TSO_3)) {
6213                         mss |= (hdr_len & 0xc) << 12;
6214                         if (hdr_len & 0x10)
6215                                 base_flags |= 0x00000010;
6216                         base_flags |= (hdr_len & 0x3e0) << 5;
6217                 } else if (tg3_flag(tp, HW_TSO_2))
6218                         mss |= hdr_len << 9;
6219                 else if (tg3_flag(tp, HW_TSO_1) ||
6220                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6221                         if (tcp_opt_len || iph->ihl > 5) {
6222                                 int tsflags;
6223
6224                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6225                                 mss |= (tsflags << 11);
6226                         }
6227                 } else {
6228                         if (tcp_opt_len || iph->ihl > 5) {
6229                                 int tsflags;
6230
6231                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6232                                 base_flags |= tsflags << 12;
6233                         }
6234                 }
6235         }
6236
6237 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6238         if (vlan_tx_tag_present(skb)) {
6239                 base_flags |= TXD_FLAG_VLAN;
6240                 vlan = vlan_tx_tag_get(skb);
6241         }
6242 #endif
6243
6244         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6245             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6246                 base_flags |= TXD_FLAG_JMB_PKT;
6247
6248         len = skb_headlen(skb);
6249
6250         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6251         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6252                 dev_kfree_skb(skb);
6253                 goto out_unlock;
6254         }
6255
6256         tnapi->tx_buffers[entry].skb = skb;
6257         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6258
6259         would_hit_hwbug = 0;
6260
6261         if (tg3_flag(tp, 5701_DMA_BUG))
6262                 would_hit_hwbug = 1;
6263
6264         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6265                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6266                             mss, vlan))
6267                 would_hit_hwbug = 1;
6268
6269         /* Now loop through additional data fragments, and queue them. */
6270         if (skb_shinfo(skb)->nr_frags > 0) {
6271                 u32 tmp_mss = mss;
6272
6273                 if (!tg3_flag(tp, HW_TSO_1) &&
6274                     !tg3_flag(tp, HW_TSO_2) &&
6275                     !tg3_flag(tp, HW_TSO_3))
6276                         tmp_mss = 0;
6277
6278                 last = skb_shinfo(skb)->nr_frags - 1;
6279                 for (i = 0; i <= last; i++) {
6280                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6281
6282                         len = frag->size;
6283                         mapping = pci_map_page(tp->pdev,
6284                                                frag->page,
6285                                                frag->page_offset,
6286                                                len, PCI_DMA_TODEVICE);
6287
6288                         tnapi->tx_buffers[entry].skb = NULL;
6289                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6290                                            mapping);
6291                         if (pci_dma_mapping_error(tp->pdev, mapping))
6292                                 goto dma_error;
6293
6294                         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6295                                             len, base_flags |
6296                                             ((i == last) ? TXD_FLAG_END : 0),
6297                                             tmp_mss, vlan))
6298                                 would_hit_hwbug = 1;
6299                 }
6300         }
6301
6302         if (would_hit_hwbug) {
6303                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6304
6305                 /* If the workaround fails due to memory/mapping
6306                  * failure, silently drop this packet.
6307                  */
6308                 entry = tnapi->tx_prod;
6309                 budget = tg3_tx_avail(tnapi);
6310                 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6311                                                 base_flags, mss, vlan))
6312                         goto out_unlock;
6313         }
6314
6315         skb_tx_timestamp(skb);
6316
6317         /* Packets are ready, update Tx producer idx local and on card. */
6318         tw32_tx_mbox(tnapi->prodmbox, entry);
6319
6320         tnapi->tx_prod = entry;
6321         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6322                 netif_tx_stop_queue(txq);
6323
6324                 /* netif_tx_stop_queue() must be done before checking
6325                  * checking tx index in tg3_tx_avail() below, because in
6326                  * tg3_tx(), we update tx index before checking for
6327                  * netif_tx_queue_stopped().
6328                  */
6329                 smp_mb();
6330                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6331                         netif_tx_wake_queue(txq);
6332         }
6333
6334 out_unlock:
6335         mmiowb();
6336
6337         return NETDEV_TX_OK;
6338
6339 dma_error:
6340         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6341         dev_kfree_skb(skb);
6342         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6343         return NETDEV_TX_OK;
6344 }
6345
6346 static void tg3_set_loopback(struct net_device *dev, u32 features)
6347 {
6348         struct tg3 *tp = netdev_priv(dev);
6349
6350         if (features & NETIF_F_LOOPBACK) {
6351                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6352                         return;
6353
6354                 /*
6355                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6356                  * loopback mode if Half-Duplex mode was negotiated earlier.
6357                  */
6358                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6359
6360                 /* Enable internal MAC loopback mode */
6361                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6362                 spin_lock_bh(&tp->lock);
6363                 tw32(MAC_MODE, tp->mac_mode);
6364                 netif_carrier_on(tp->dev);
6365                 spin_unlock_bh(&tp->lock);
6366                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6367         } else {
6368                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6369                         return;
6370
6371                 /* Disable internal MAC loopback mode */
6372                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6373                 spin_lock_bh(&tp->lock);
6374                 tw32(MAC_MODE, tp->mac_mode);
6375                 /* Force link status check */
6376                 tg3_setup_phy(tp, 1);
6377                 spin_unlock_bh(&tp->lock);
6378                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6379         }
6380 }
6381
6382 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6383 {
6384         struct tg3 *tp = netdev_priv(dev);
6385
6386         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6387                 features &= ~NETIF_F_ALL_TSO;
6388
6389         return features;
6390 }
6391
6392 static int tg3_set_features(struct net_device *dev, u32 features)
6393 {
6394         u32 changed = dev->features ^ features;
6395
6396         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6397                 tg3_set_loopback(dev, features);
6398
6399         return 0;
6400 }
6401
6402 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6403                                int new_mtu)
6404 {
6405         dev->mtu = new_mtu;
6406
6407         if (new_mtu > ETH_DATA_LEN) {
6408                 if (tg3_flag(tp, 5780_CLASS)) {
6409                         netdev_update_features(dev);
6410                         tg3_flag_clear(tp, TSO_CAPABLE);
6411                 } else {
6412                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6413                 }
6414         } else {
6415                 if (tg3_flag(tp, 5780_CLASS)) {
6416                         tg3_flag_set(tp, TSO_CAPABLE);
6417                         netdev_update_features(dev);
6418                 }
6419                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6420         }
6421 }
6422
6423 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6424 {
6425         struct tg3 *tp = netdev_priv(dev);
6426         int err;
6427
6428         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6429                 return -EINVAL;
6430
6431         if (!netif_running(dev)) {
6432                 /* We'll just catch it later when the
6433                  * device is up'd.
6434                  */
6435                 tg3_set_mtu(dev, tp, new_mtu);
6436                 return 0;
6437         }
6438
6439         tg3_phy_stop(tp);
6440
6441         tg3_netif_stop(tp);
6442
6443         tg3_full_lock(tp, 1);
6444
6445         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6446
6447         tg3_set_mtu(dev, tp, new_mtu);
6448
6449         err = tg3_restart_hw(tp, 0);
6450
6451         if (!err)
6452                 tg3_netif_start(tp);
6453
6454         tg3_full_unlock(tp);
6455
6456         if (!err)
6457                 tg3_phy_start(tp);
6458
6459         return err;
6460 }
6461
6462 static void tg3_rx_prodring_free(struct tg3 *tp,
6463                                  struct tg3_rx_prodring_set *tpr)
6464 {
6465         int i;
6466
6467         if (tpr != &tp->napi[0].prodring) {
6468                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6469                      i = (i + 1) & tp->rx_std_ring_mask)
6470                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6471                                         tp->rx_pkt_map_sz);
6472
6473                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6474                         for (i = tpr->rx_jmb_cons_idx;
6475                              i != tpr->rx_jmb_prod_idx;
6476                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6477                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6478                                                 TG3_RX_JMB_MAP_SZ);
6479                         }
6480                 }
6481
6482                 return;
6483         }
6484
6485         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6486                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6487                                 tp->rx_pkt_map_sz);
6488
6489         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6490                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6491                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6492                                         TG3_RX_JMB_MAP_SZ);
6493         }
6494 }
6495
6496 /* Initialize rx rings for packet processing.
6497  *
6498  * The chip has been shut down and the driver detached from
6499  * the networking, so no interrupts or new tx packets will
6500  * end up in the driver.  tp->{tx,}lock are held and thus
6501  * we may not sleep.
6502  */
6503 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6504                                  struct tg3_rx_prodring_set *tpr)
6505 {
6506         u32 i, rx_pkt_dma_sz;
6507
6508         tpr->rx_std_cons_idx = 0;
6509         tpr->rx_std_prod_idx = 0;
6510         tpr->rx_jmb_cons_idx = 0;
6511         tpr->rx_jmb_prod_idx = 0;
6512
6513         if (tpr != &tp->napi[0].prodring) {
6514                 memset(&tpr->rx_std_buffers[0], 0,
6515                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6516                 if (tpr->rx_jmb_buffers)
6517                         memset(&tpr->rx_jmb_buffers[0], 0,
6518                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6519                 goto done;
6520         }
6521
6522         /* Zero out all descriptors. */
6523         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6524
6525         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6526         if (tg3_flag(tp, 5780_CLASS) &&
6527             tp->dev->mtu > ETH_DATA_LEN)
6528                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6529         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6530
6531         /* Initialize invariants of the rings, we only set this
6532          * stuff once.  This works because the card does not
6533          * write into the rx buffer posting rings.
6534          */
6535         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6536                 struct tg3_rx_buffer_desc *rxd;
6537
6538                 rxd = &tpr->rx_std[i];
6539                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6540                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6541                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6542                                (i << RXD_OPAQUE_INDEX_SHIFT));
6543         }
6544
6545         /* Now allocate fresh SKBs for each rx ring. */
6546         for (i = 0; i < tp->rx_pending; i++) {
6547                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6548                         netdev_warn(tp->dev,
6549                                     "Using a smaller RX standard ring. Only "
6550                                     "%d out of %d buffers were allocated "
6551                                     "successfully\n", i, tp->rx_pending);
6552                         if (i == 0)
6553                                 goto initfail;
6554                         tp->rx_pending = i;
6555                         break;
6556                 }
6557         }
6558
6559         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6560                 goto done;
6561
6562         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6563
6564         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6565                 goto done;
6566
6567         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6568                 struct tg3_rx_buffer_desc *rxd;
6569
6570                 rxd = &tpr->rx_jmb[i].std;
6571                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6572                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6573                                   RXD_FLAG_JUMBO;
6574                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6575                        (i << RXD_OPAQUE_INDEX_SHIFT));
6576         }
6577
6578         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6579                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6580                         netdev_warn(tp->dev,
6581                                     "Using a smaller RX jumbo ring. Only %d "
6582                                     "out of %d buffers were allocated "
6583                                     "successfully\n", i, tp->rx_jumbo_pending);
6584                         if (i == 0)
6585                                 goto initfail;
6586                         tp->rx_jumbo_pending = i;
6587                         break;
6588                 }
6589         }
6590
6591 done:
6592         return 0;
6593
6594 initfail:
6595         tg3_rx_prodring_free(tp, tpr);
6596         return -ENOMEM;
6597 }
6598
6599 static void tg3_rx_prodring_fini(struct tg3 *tp,
6600                                  struct tg3_rx_prodring_set *tpr)
6601 {
6602         kfree(tpr->rx_std_buffers);
6603         tpr->rx_std_buffers = NULL;
6604         kfree(tpr->rx_jmb_buffers);
6605         tpr->rx_jmb_buffers = NULL;
6606         if (tpr->rx_std) {
6607                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6608                                   tpr->rx_std, tpr->rx_std_mapping);
6609                 tpr->rx_std = NULL;
6610         }
6611         if (tpr->rx_jmb) {
6612                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6613                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6614                 tpr->rx_jmb = NULL;
6615         }
6616 }
6617
6618 static int tg3_rx_prodring_init(struct tg3 *tp,
6619                                 struct tg3_rx_prodring_set *tpr)
6620 {
6621         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6622                                       GFP_KERNEL);
6623         if (!tpr->rx_std_buffers)
6624                 return -ENOMEM;
6625
6626         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6627                                          TG3_RX_STD_RING_BYTES(tp),
6628                                          &tpr->rx_std_mapping,
6629                                          GFP_KERNEL);
6630         if (!tpr->rx_std)
6631                 goto err_out;
6632
6633         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6634                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6635                                               GFP_KERNEL);
6636                 if (!tpr->rx_jmb_buffers)
6637                         goto err_out;
6638
6639                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6640                                                  TG3_RX_JMB_RING_BYTES(tp),
6641                                                  &tpr->rx_jmb_mapping,
6642                                                  GFP_KERNEL);
6643                 if (!tpr->rx_jmb)
6644                         goto err_out;
6645         }
6646
6647         return 0;
6648
6649 err_out:
6650         tg3_rx_prodring_fini(tp, tpr);
6651         return -ENOMEM;
6652 }
6653
6654 /* Free up pending packets in all rx/tx rings.
6655  *
6656  * The chip has been shut down and the driver detached from
6657  * the networking, so no interrupts or new tx packets will
6658  * end up in the driver.  tp->{tx,}lock is not held and we are not
6659  * in an interrupt context and thus may sleep.
6660  */
6661 static void tg3_free_rings(struct tg3 *tp)
6662 {
6663         int i, j;
6664
6665         for (j = 0; j < tp->irq_cnt; j++) {
6666                 struct tg3_napi *tnapi = &tp->napi[j];
6667
6668                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6669
6670                 if (!tnapi->tx_buffers)
6671                         continue;
6672
6673                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6674                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6675
6676                         if (!skb)
6677                                 continue;
6678
6679                         tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6680
6681                         dev_kfree_skb_any(skb);
6682                 }
6683         }
6684 }
6685
6686 /* Initialize tx/rx rings for packet processing.
6687  *
6688  * The chip has been shut down and the driver detached from
6689  * the networking, so no interrupts or new tx packets will
6690  * end up in the driver.  tp->{tx,}lock are held and thus
6691  * we may not sleep.
6692  */
6693 static int tg3_init_rings(struct tg3 *tp)
6694 {
6695         int i;
6696
6697         /* Free up all the SKBs. */
6698         tg3_free_rings(tp);
6699
6700         for (i = 0; i < tp->irq_cnt; i++) {
6701                 struct tg3_napi *tnapi = &tp->napi[i];
6702
6703                 tnapi->last_tag = 0;
6704                 tnapi->last_irq_tag = 0;
6705                 tnapi->hw_status->status = 0;
6706                 tnapi->hw_status->status_tag = 0;
6707                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6708
6709                 tnapi->tx_prod = 0;
6710                 tnapi->tx_cons = 0;
6711                 if (tnapi->tx_ring)
6712                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6713
6714                 tnapi->rx_rcb_ptr = 0;
6715                 if (tnapi->rx_rcb)
6716                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6717
6718                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6719                         tg3_free_rings(tp);
6720                         return -ENOMEM;
6721                 }
6722         }
6723
6724         return 0;
6725 }
6726
6727 /*
6728  * Must not be invoked with interrupt sources disabled and
6729  * the hardware shutdown down.
6730  */
6731 static void tg3_free_consistent(struct tg3 *tp)
6732 {
6733         int i;
6734
6735         for (i = 0; i < tp->irq_cnt; i++) {
6736                 struct tg3_napi *tnapi = &tp->napi[i];
6737
6738                 if (tnapi->tx_ring) {
6739                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6740                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6741                         tnapi->tx_ring = NULL;
6742                 }
6743
6744                 kfree(tnapi->tx_buffers);
6745                 tnapi->tx_buffers = NULL;
6746
6747                 if (tnapi->rx_rcb) {
6748                         dma_free_coherent(&tp->pdev->dev,
6749                                           TG3_RX_RCB_RING_BYTES(tp),
6750                                           tnapi->rx_rcb,
6751                                           tnapi->rx_rcb_mapping);
6752                         tnapi->rx_rcb = NULL;
6753                 }
6754
6755                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6756
6757                 if (tnapi->hw_status) {
6758                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6759                                           tnapi->hw_status,
6760                                           tnapi->status_mapping);
6761                         tnapi->hw_status = NULL;
6762                 }
6763         }
6764
6765         if (tp->hw_stats) {
6766                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6767                                   tp->hw_stats, tp->stats_mapping);
6768                 tp->hw_stats = NULL;
6769         }
6770 }
6771
6772 /*
6773  * Must not be invoked with interrupt sources disabled and
6774  * the hardware shutdown down.  Can sleep.
6775  */
6776 static int tg3_alloc_consistent(struct tg3 *tp)
6777 {
6778         int i;
6779
6780         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6781                                           sizeof(struct tg3_hw_stats),
6782                                           &tp->stats_mapping,
6783                                           GFP_KERNEL);
6784         if (!tp->hw_stats)
6785                 goto err_out;
6786
6787         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6788
6789         for (i = 0; i < tp->irq_cnt; i++) {
6790                 struct tg3_napi *tnapi = &tp->napi[i];
6791                 struct tg3_hw_status *sblk;
6792
6793                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6794                                                       TG3_HW_STATUS_SIZE,
6795                                                       &tnapi->status_mapping,
6796                                                       GFP_KERNEL);
6797                 if (!tnapi->hw_status)
6798                         goto err_out;
6799
6800                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6801                 sblk = tnapi->hw_status;
6802
6803                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6804                         goto err_out;
6805
6806                 /* If multivector TSS is enabled, vector 0 does not handle
6807                  * tx interrupts.  Don't allocate any resources for it.
6808                  */
6809                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6810                     (i && tg3_flag(tp, ENABLE_TSS))) {
6811                         tnapi->tx_buffers = kzalloc(
6812                                                sizeof(struct tg3_tx_ring_info) *
6813                                                TG3_TX_RING_SIZE, GFP_KERNEL);
6814                         if (!tnapi->tx_buffers)
6815                                 goto err_out;
6816
6817                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6818                                                             TG3_TX_RING_BYTES,
6819                                                         &tnapi->tx_desc_mapping,
6820                                                             GFP_KERNEL);
6821                         if (!tnapi->tx_ring)
6822                                 goto err_out;
6823                 }
6824
6825                 /*
6826                  * When RSS is enabled, the status block format changes
6827                  * slightly.  The "rx_jumbo_consumer", "reserved",
6828                  * and "rx_mini_consumer" members get mapped to the
6829                  * other three rx return ring producer indexes.
6830                  */
6831                 switch (i) {
6832                 default:
6833                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6834                         break;
6835                 case 2:
6836                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6837                         break;
6838                 case 3:
6839                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6840                         break;
6841                 case 4:
6842                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6843                         break;
6844                 }
6845
6846                 /*
6847                  * If multivector RSS is enabled, vector 0 does not handle
6848                  * rx or tx interrupts.  Don't allocate any resources for it.
6849                  */
6850                 if (!i && tg3_flag(tp, ENABLE_RSS))
6851                         continue;
6852
6853                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6854                                                    TG3_RX_RCB_RING_BYTES(tp),
6855                                                    &tnapi->rx_rcb_mapping,
6856                                                    GFP_KERNEL);
6857                 if (!tnapi->rx_rcb)
6858                         goto err_out;
6859
6860                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6861         }
6862
6863         return 0;
6864
6865 err_out:
6866         tg3_free_consistent(tp);
6867         return -ENOMEM;
6868 }
6869
6870 #define MAX_WAIT_CNT 1000
6871
6872 /* To stop a block, clear the enable bit and poll till it
6873  * clears.  tp->lock is held.
6874  */
6875 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6876 {
6877         unsigned int i;
6878         u32 val;
6879
6880         if (tg3_flag(tp, 5705_PLUS)) {
6881                 switch (ofs) {
6882                 case RCVLSC_MODE:
6883                 case DMAC_MODE:
6884                 case MBFREE_MODE:
6885                 case BUFMGR_MODE:
6886                 case MEMARB_MODE:
6887                         /* We can't enable/disable these bits of the
6888                          * 5705/5750, just say success.
6889                          */
6890                         return 0;
6891
6892                 default:
6893                         break;
6894                 }
6895         }
6896
6897         val = tr32(ofs);
6898         val &= ~enable_bit;
6899         tw32_f(ofs, val);
6900
6901         for (i = 0; i < MAX_WAIT_CNT; i++) {
6902                 udelay(100);
6903                 val = tr32(ofs);
6904                 if ((val & enable_bit) == 0)
6905                         break;
6906         }
6907
6908         if (i == MAX_WAIT_CNT && !silent) {
6909                 dev_err(&tp->pdev->dev,
6910                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6911                         ofs, enable_bit);
6912                 return -ENODEV;
6913         }
6914
6915         return 0;
6916 }
6917
6918 /* tp->lock is held. */
6919 static int tg3_abort_hw(struct tg3 *tp, int silent)
6920 {
6921         int i, err;
6922
6923         tg3_disable_ints(tp);
6924
6925         tp->rx_mode &= ~RX_MODE_ENABLE;
6926         tw32_f(MAC_RX_MODE, tp->rx_mode);
6927         udelay(10);
6928
6929         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6930         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6931         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6932         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6933         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6934         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6935
6936         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6937         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6938         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6939         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6940         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6941         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6942         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6943
6944         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6945         tw32_f(MAC_MODE, tp->mac_mode);
6946         udelay(40);
6947
6948         tp->tx_mode &= ~TX_MODE_ENABLE;
6949         tw32_f(MAC_TX_MODE, tp->tx_mode);
6950
6951         for (i = 0; i < MAX_WAIT_CNT; i++) {
6952                 udelay(100);
6953                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6954                         break;
6955         }
6956         if (i >= MAX_WAIT_CNT) {
6957                 dev_err(&tp->pdev->dev,
6958                         "%s timed out, TX_MODE_ENABLE will not clear "
6959                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6960                 err |= -ENODEV;
6961         }
6962
6963         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6964         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6965         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6966
6967         tw32(FTQ_RESET, 0xffffffff);
6968         tw32(FTQ_RESET, 0x00000000);
6969
6970         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6971         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6972
6973         for (i = 0; i < tp->irq_cnt; i++) {
6974                 struct tg3_napi *tnapi = &tp->napi[i];
6975                 if (tnapi->hw_status)
6976                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6977         }
6978         if (tp->hw_stats)
6979                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6980
6981         return err;
6982 }
6983
6984 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6985 {
6986         int i;
6987         u32 apedata;
6988
6989         /* NCSI does not support APE events */
6990         if (tg3_flag(tp, APE_HAS_NCSI))
6991                 return;
6992
6993         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6994         if (apedata != APE_SEG_SIG_MAGIC)
6995                 return;
6996
6997         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6998         if (!(apedata & APE_FW_STATUS_READY))
6999                 return;
7000
7001         /* Wait for up to 1 millisecond for APE to service previous event. */
7002         for (i = 0; i < 10; i++) {
7003                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
7004                         return;
7005
7006                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
7007
7008                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7009                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
7010                                         event | APE_EVENT_STATUS_EVENT_PENDING);
7011
7012                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7013
7014                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7015                         break;
7016
7017                 udelay(100);
7018         }
7019
7020         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7021                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7022 }
7023
7024 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7025 {
7026         u32 event;
7027         u32 apedata;
7028
7029         if (!tg3_flag(tp, ENABLE_APE))
7030                 return;
7031
7032         switch (kind) {
7033         case RESET_KIND_INIT:
7034                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7035                                 APE_HOST_SEG_SIG_MAGIC);
7036                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7037                                 APE_HOST_SEG_LEN_MAGIC);
7038                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7039                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7040                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7041                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7042                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7043                                 APE_HOST_BEHAV_NO_PHYLOCK);
7044                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7045                                     TG3_APE_HOST_DRVR_STATE_START);
7046
7047                 event = APE_EVENT_STATUS_STATE_START;
7048                 break;
7049         case RESET_KIND_SHUTDOWN:
7050                 /* With the interface we are currently using,
7051                  * APE does not track driver state.  Wiping
7052                  * out the HOST SEGMENT SIGNATURE forces
7053                  * the APE to assume OS absent status.
7054                  */
7055                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7056
7057                 if (device_may_wakeup(&tp->pdev->dev) &&
7058                     tg3_flag(tp, WOL_ENABLE)) {
7059                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7060                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7061                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7062                 } else
7063                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7064
7065                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7066
7067                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7068                 break;
7069         case RESET_KIND_SUSPEND:
7070                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7071                 break;
7072         default:
7073                 return;
7074         }
7075
7076         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7077
7078         tg3_ape_send_event(tp, event);
7079 }
7080
7081 /* tp->lock is held. */
7082 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7083 {
7084         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7085                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7086
7087         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7088                 switch (kind) {
7089                 case RESET_KIND_INIT:
7090                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7091                                       DRV_STATE_START);
7092                         break;
7093
7094                 case RESET_KIND_SHUTDOWN:
7095                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7096                                       DRV_STATE_UNLOAD);
7097                         break;
7098
7099                 case RESET_KIND_SUSPEND:
7100                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7101                                       DRV_STATE_SUSPEND);
7102                         break;
7103
7104                 default:
7105                         break;
7106                 }
7107         }
7108
7109         if (kind == RESET_KIND_INIT ||
7110             kind == RESET_KIND_SUSPEND)
7111                 tg3_ape_driver_state_change(tp, kind);
7112 }
7113
7114 /* tp->lock is held. */
7115 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7116 {
7117         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7118                 switch (kind) {
7119                 case RESET_KIND_INIT:
7120                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7121                                       DRV_STATE_START_DONE);
7122                         break;
7123
7124                 case RESET_KIND_SHUTDOWN:
7125                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7126                                       DRV_STATE_UNLOAD_DONE);
7127                         break;
7128
7129                 default:
7130                         break;
7131                 }
7132         }
7133
7134         if (kind == RESET_KIND_SHUTDOWN)
7135                 tg3_ape_driver_state_change(tp, kind);
7136 }
7137
7138 /* tp->lock is held. */
7139 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7140 {
7141         if (tg3_flag(tp, ENABLE_ASF)) {
7142                 switch (kind) {
7143                 case RESET_KIND_INIT:
7144                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7145                                       DRV_STATE_START);
7146                         break;
7147
7148                 case RESET_KIND_SHUTDOWN:
7149                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7150                                       DRV_STATE_UNLOAD);
7151                         break;
7152
7153                 case RESET_KIND_SUSPEND:
7154                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7155                                       DRV_STATE_SUSPEND);
7156                         break;
7157
7158                 default:
7159                         break;
7160                 }
7161         }
7162 }
7163
7164 static int tg3_poll_fw(struct tg3 *tp)
7165 {
7166         int i;
7167         u32 val;
7168
7169         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7170                 /* Wait up to 20ms for init done. */
7171                 for (i = 0; i < 200; i++) {
7172                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7173                                 return 0;
7174                         udelay(100);
7175                 }
7176                 return -ENODEV;
7177         }
7178
7179         /* Wait for firmware initialization to complete. */
7180         for (i = 0; i < 100000; i++) {
7181                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7182                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7183                         break;
7184                 udelay(10);
7185         }
7186
7187         /* Chip might not be fitted with firmware.  Some Sun onboard
7188          * parts are configured like that.  So don't signal the timeout
7189          * of the above loop as an error, but do report the lack of
7190          * running firmware once.
7191          */
7192         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7193                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7194
7195                 netdev_info(tp->dev, "No firmware running\n");
7196         }
7197
7198         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7199                 /* The 57765 A0 needs a little more
7200                  * time to do some important work.
7201                  */
7202                 mdelay(10);
7203         }
7204
7205         return 0;
7206 }
7207
7208 /* Save PCI command register before chip reset */
7209 static void tg3_save_pci_state(struct tg3 *tp)
7210 {
7211         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7212 }
7213
7214 /* Restore PCI state after chip reset */
7215 static void tg3_restore_pci_state(struct tg3 *tp)
7216 {
7217         u32 val;
7218
7219         /* Re-enable indirect register accesses. */
7220         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7221                                tp->misc_host_ctrl);
7222
7223         /* Set MAX PCI retry to zero. */
7224         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7225         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7226             tg3_flag(tp, PCIX_MODE))
7227                 val |= PCISTATE_RETRY_SAME_DMA;
7228         /* Allow reads and writes to the APE register and memory space. */
7229         if (tg3_flag(tp, ENABLE_APE))
7230                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7231                        PCISTATE_ALLOW_APE_SHMEM_WR |
7232                        PCISTATE_ALLOW_APE_PSPACE_WR;
7233         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7234
7235         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7236
7237         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7238                 if (tg3_flag(tp, PCI_EXPRESS))
7239                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7240                 else {
7241                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7242                                               tp->pci_cacheline_sz);
7243                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7244                                               tp->pci_lat_timer);
7245                 }
7246         }
7247
7248         /* Make sure PCI-X relaxed ordering bit is clear. */
7249         if (tg3_flag(tp, PCIX_MODE)) {
7250                 u16 pcix_cmd;
7251
7252                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7253                                      &pcix_cmd);
7254                 pcix_cmd &= ~PCI_X_CMD_ERO;
7255                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7256                                       pcix_cmd);
7257         }
7258
7259         if (tg3_flag(tp, 5780_CLASS)) {
7260
7261                 /* Chip reset on 5780 will reset MSI enable bit,
7262                  * so need to restore it.
7263                  */
7264                 if (tg3_flag(tp, USING_MSI)) {
7265                         u16 ctrl;
7266
7267                         pci_read_config_word(tp->pdev,
7268                                              tp->msi_cap + PCI_MSI_FLAGS,
7269                                              &ctrl);
7270                         pci_write_config_word(tp->pdev,
7271                                               tp->msi_cap + PCI_MSI_FLAGS,
7272                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7273                         val = tr32(MSGINT_MODE);
7274                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7275                 }
7276         }
7277 }
7278
7279 static void tg3_stop_fw(struct tg3 *);
7280
7281 /* tp->lock is held. */
7282 static int tg3_chip_reset(struct tg3 *tp)
7283 {
7284         u32 val;
7285         void (*write_op)(struct tg3 *, u32, u32);
7286         int i, err;
7287
7288         tg3_nvram_lock(tp);
7289
7290         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7291
7292         /* No matching tg3_nvram_unlock() after this because
7293          * chip reset below will undo the nvram lock.
7294          */
7295         tp->nvram_lock_cnt = 0;
7296
7297         /* GRC_MISC_CFG core clock reset will clear the memory
7298          * enable bit in PCI register 4 and the MSI enable bit
7299          * on some chips, so we save relevant registers here.
7300          */
7301         tg3_save_pci_state(tp);
7302
7303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7304             tg3_flag(tp, 5755_PLUS))
7305                 tw32(GRC_FASTBOOT_PC, 0);
7306
7307         /*
7308          * We must avoid the readl() that normally takes place.
7309          * It locks machines, causes machine checks, and other
7310          * fun things.  So, temporarily disable the 5701
7311          * hardware workaround, while we do the reset.
7312          */
7313         write_op = tp->write32;
7314         if (write_op == tg3_write_flush_reg32)
7315                 tp->write32 = tg3_write32;
7316
7317         /* Prevent the irq handler from reading or writing PCI registers
7318          * during chip reset when the memory enable bit in the PCI command
7319          * register may be cleared.  The chip does not generate interrupt
7320          * at this time, but the irq handler may still be called due to irq
7321          * sharing or irqpoll.
7322          */
7323         tg3_flag_set(tp, CHIP_RESETTING);
7324         for (i = 0; i < tp->irq_cnt; i++) {
7325                 struct tg3_napi *tnapi = &tp->napi[i];
7326                 if (tnapi->hw_status) {
7327                         tnapi->hw_status->status = 0;
7328                         tnapi->hw_status->status_tag = 0;
7329                 }
7330                 tnapi->last_tag = 0;
7331                 tnapi->last_irq_tag = 0;
7332         }
7333         smp_mb();
7334
7335         for (i = 0; i < tp->irq_cnt; i++)
7336                 synchronize_irq(tp->napi[i].irq_vec);
7337
7338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7339                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7340                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7341         }
7342
7343         /* do the reset */
7344         val = GRC_MISC_CFG_CORECLK_RESET;
7345
7346         if (tg3_flag(tp, PCI_EXPRESS)) {
7347                 /* Force PCIe 1.0a mode */
7348                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7349                     !tg3_flag(tp, 57765_PLUS) &&
7350                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7351                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7352                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7353
7354                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7355                         tw32(GRC_MISC_CFG, (1 << 29));
7356                         val |= (1 << 29);
7357                 }
7358         }
7359
7360         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7361                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7362                 tw32(GRC_VCPU_EXT_CTRL,
7363                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7364         }
7365
7366         /* Manage gphy power for all CPMU absent PCIe devices. */
7367         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7368                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7369
7370         tw32(GRC_MISC_CFG, val);
7371
7372         /* restore 5701 hardware bug workaround write method */
7373         tp->write32 = write_op;
7374
7375         /* Unfortunately, we have to delay before the PCI read back.
7376          * Some 575X chips even will not respond to a PCI cfg access
7377          * when the reset command is given to the chip.
7378          *
7379          * How do these hardware designers expect things to work
7380          * properly if the PCI write is posted for a long period
7381          * of time?  It is always necessary to have some method by
7382          * which a register read back can occur to push the write
7383          * out which does the reset.
7384          *
7385          * For most tg3 variants the trick below was working.
7386          * Ho hum...
7387          */
7388         udelay(120);
7389
7390         /* Flush PCI posted writes.  The normal MMIO registers
7391          * are inaccessible at this time so this is the only
7392          * way to make this reliably (actually, this is no longer
7393          * the case, see above).  I tried to use indirect
7394          * register read/write but this upset some 5701 variants.
7395          */
7396         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7397
7398         udelay(120);
7399
7400         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7401                 u16 val16;
7402
7403                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7404                         int i;
7405                         u32 cfg_val;
7406
7407                         /* Wait for link training to complete.  */
7408                         for (i = 0; i < 5000; i++)
7409                                 udelay(100);
7410
7411                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7412                         pci_write_config_dword(tp->pdev, 0xc4,
7413                                                cfg_val | (1 << 15));
7414                 }
7415
7416                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7417                 pci_read_config_word(tp->pdev,
7418                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7419                                      &val16);
7420                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7421                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7422                 /*
7423                  * Older PCIe devices only support the 128 byte
7424                  * MPS setting.  Enforce the restriction.
7425                  */
7426                 if (!tg3_flag(tp, CPMU_PRESENT))
7427                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7428                 pci_write_config_word(tp->pdev,
7429                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7430                                       val16);
7431
7432                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7433
7434                 /* Clear error status */
7435                 pci_write_config_word(tp->pdev,
7436                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7437                                       PCI_EXP_DEVSTA_CED |
7438                                       PCI_EXP_DEVSTA_NFED |
7439                                       PCI_EXP_DEVSTA_FED |
7440                                       PCI_EXP_DEVSTA_URD);
7441         }
7442
7443         tg3_restore_pci_state(tp);
7444
7445         tg3_flag_clear(tp, CHIP_RESETTING);
7446         tg3_flag_clear(tp, ERROR_PROCESSED);
7447
7448         val = 0;
7449         if (tg3_flag(tp, 5780_CLASS))
7450                 val = tr32(MEMARB_MODE);
7451         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7452
7453         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7454                 tg3_stop_fw(tp);
7455                 tw32(0x5000, 0x400);
7456         }
7457
7458         tw32(GRC_MODE, tp->grc_mode);
7459
7460         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7461                 val = tr32(0xc4);
7462
7463                 tw32(0xc4, val | (1 << 15));
7464         }
7465
7466         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7467             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7468                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7469                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7470                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7471                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7472         }
7473
7474         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7475                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7476                 val = tp->mac_mode;
7477         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7478                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7479                 val = tp->mac_mode;
7480         } else
7481                 val = 0;
7482
7483         tw32_f(MAC_MODE, val);
7484         udelay(40);
7485
7486         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7487
7488         err = tg3_poll_fw(tp);
7489         if (err)
7490                 return err;
7491
7492         tg3_mdio_start(tp);
7493
7494         if (tg3_flag(tp, PCI_EXPRESS) &&
7495             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7496             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7497             !tg3_flag(tp, 57765_PLUS)) {
7498                 val = tr32(0x7c00);
7499
7500                 tw32(0x7c00, val | (1 << 25));
7501         }
7502
7503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7504                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7505                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7506         }
7507
7508         /* Reprobe ASF enable state.  */
7509         tg3_flag_clear(tp, ENABLE_ASF);
7510         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7511         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7512         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7513                 u32 nic_cfg;
7514
7515                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7516                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7517                         tg3_flag_set(tp, ENABLE_ASF);
7518                         tp->last_event_jiffies = jiffies;
7519                         if (tg3_flag(tp, 5750_PLUS))
7520                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7521                 }
7522         }
7523
7524         return 0;
7525 }
7526
7527 /* tp->lock is held. */
7528 static void tg3_stop_fw(struct tg3 *tp)
7529 {
7530         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7531                 /* Wait for RX cpu to ACK the previous event. */
7532                 tg3_wait_for_event_ack(tp);
7533
7534                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7535
7536                 tg3_generate_fw_event(tp);
7537
7538                 /* Wait for RX cpu to ACK this event. */
7539                 tg3_wait_for_event_ack(tp);
7540         }
7541 }
7542
7543 /* tp->lock is held. */
7544 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7545 {
7546         int err;
7547
7548         tg3_stop_fw(tp);
7549
7550         tg3_write_sig_pre_reset(tp, kind);
7551
7552         tg3_abort_hw(tp, silent);
7553         err = tg3_chip_reset(tp);
7554
7555         __tg3_set_mac_addr(tp, 0);
7556
7557         tg3_write_sig_legacy(tp, kind);
7558         tg3_write_sig_post_reset(tp, kind);
7559
7560         if (err)
7561                 return err;
7562
7563         return 0;
7564 }
7565
7566 #define RX_CPU_SCRATCH_BASE     0x30000
7567 #define RX_CPU_SCRATCH_SIZE     0x04000
7568 #define TX_CPU_SCRATCH_BASE     0x34000
7569 #define TX_CPU_SCRATCH_SIZE     0x04000
7570
7571 /* tp->lock is held. */
7572 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7573 {
7574         int i;
7575
7576         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7577
7578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7579                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7580
7581                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7582                 return 0;
7583         }
7584         if (offset == RX_CPU_BASE) {
7585                 for (i = 0; i < 10000; i++) {
7586                         tw32(offset + CPU_STATE, 0xffffffff);
7587                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7588                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7589                                 break;
7590                 }
7591
7592                 tw32(offset + CPU_STATE, 0xffffffff);
7593                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7594                 udelay(10);
7595         } else {
7596                 for (i = 0; i < 10000; i++) {
7597                         tw32(offset + CPU_STATE, 0xffffffff);
7598                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7599                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7600                                 break;
7601                 }
7602         }
7603
7604         if (i >= 10000) {
7605                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7606                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7607                 return -ENODEV;
7608         }
7609
7610         /* Clear firmware's nvram arbitration. */
7611         if (tg3_flag(tp, NVRAM))
7612                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7613         return 0;
7614 }
7615
7616 struct fw_info {
7617         unsigned int fw_base;
7618         unsigned int fw_len;
7619         const __be32 *fw_data;
7620 };
7621
7622 /* tp->lock is held. */
7623 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7624                                  int cpu_scratch_size, struct fw_info *info)
7625 {
7626         int err, lock_err, i;
7627         void (*write_op)(struct tg3 *, u32, u32);
7628
7629         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7630                 netdev_err(tp->dev,
7631                            "%s: Trying to load TX cpu firmware which is 5705\n",
7632                            __func__);
7633                 return -EINVAL;
7634         }
7635
7636         if (tg3_flag(tp, 5705_PLUS))
7637                 write_op = tg3_write_mem;
7638         else
7639                 write_op = tg3_write_indirect_reg32;
7640
7641         /* It is possible that bootcode is still loading at this point.
7642          * Get the nvram lock first before halting the cpu.
7643          */
7644         lock_err = tg3_nvram_lock(tp);
7645         err = tg3_halt_cpu(tp, cpu_base);
7646         if (!lock_err)
7647                 tg3_nvram_unlock(tp);
7648         if (err)
7649                 goto out;
7650
7651         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7652                 write_op(tp, cpu_scratch_base + i, 0);
7653         tw32(cpu_base + CPU_STATE, 0xffffffff);
7654         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7655         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7656                 write_op(tp, (cpu_scratch_base +
7657                               (info->fw_base & 0xffff) +
7658                               (i * sizeof(u32))),
7659                               be32_to_cpu(info->fw_data[i]));
7660
7661         err = 0;
7662
7663 out:
7664         return err;
7665 }
7666
7667 /* tp->lock is held. */
7668 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7669 {
7670         struct fw_info info;
7671         const __be32 *fw_data;
7672         int err, i;
7673
7674         fw_data = (void *)tp->fw->data;
7675
7676         /* Firmware blob starts with version numbers, followed by
7677            start address and length. We are setting complete length.
7678            length = end_address_of_bss - start_address_of_text.
7679            Remainder is the blob to be loaded contiguously
7680            from start address. */
7681
7682         info.fw_base = be32_to_cpu(fw_data[1]);
7683         info.fw_len = tp->fw->size - 12;
7684         info.fw_data = &fw_data[3];
7685
7686         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7687                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7688                                     &info);
7689         if (err)
7690                 return err;
7691
7692         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7693                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7694                                     &info);
7695         if (err)
7696                 return err;
7697
7698         /* Now startup only the RX cpu. */
7699         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7700         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7701
7702         for (i = 0; i < 5; i++) {
7703                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7704                         break;
7705                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7706                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7707                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7708                 udelay(1000);
7709         }
7710         if (i >= 5) {
7711                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7712                            "should be %08x\n", __func__,
7713                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7714                 return -ENODEV;
7715         }
7716         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7717         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7718
7719         return 0;
7720 }
7721
7722 /* tp->lock is held. */
7723 static int tg3_load_tso_firmware(struct tg3 *tp)
7724 {
7725         struct fw_info info;
7726         const __be32 *fw_data;
7727         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7728         int err, i;
7729
7730         if (tg3_flag(tp, HW_TSO_1) ||
7731             tg3_flag(tp, HW_TSO_2) ||
7732             tg3_flag(tp, HW_TSO_3))
7733                 return 0;
7734
7735         fw_data = (void *)tp->fw->data;
7736
7737         /* Firmware blob starts with version numbers, followed by
7738            start address and length. We are setting complete length.
7739            length = end_address_of_bss - start_address_of_text.
7740            Remainder is the blob to be loaded contiguously
7741            from start address. */
7742
7743         info.fw_base = be32_to_cpu(fw_data[1]);
7744         cpu_scratch_size = tp->fw_len;
7745         info.fw_len = tp->fw->size - 12;
7746         info.fw_data = &fw_data[3];
7747
7748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7749                 cpu_base = RX_CPU_BASE;
7750                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7751         } else {
7752                 cpu_base = TX_CPU_BASE;
7753                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7754                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7755         }
7756
7757         err = tg3_load_firmware_cpu(tp, cpu_base,
7758                                     cpu_scratch_base, cpu_scratch_size,
7759                                     &info);
7760         if (err)
7761                 return err;
7762
7763         /* Now startup the cpu. */
7764         tw32(cpu_base + CPU_STATE, 0xffffffff);
7765         tw32_f(cpu_base + CPU_PC, info.fw_base);
7766
7767         for (i = 0; i < 5; i++) {
7768                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7769                         break;
7770                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7771                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7772                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7773                 udelay(1000);
7774         }
7775         if (i >= 5) {
7776                 netdev_err(tp->dev,
7777                            "%s fails to set CPU PC, is %08x should be %08x\n",
7778                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7779                 return -ENODEV;
7780         }
7781         tw32(cpu_base + CPU_STATE, 0xffffffff);
7782         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7783         return 0;
7784 }
7785
7786
7787 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7788 {
7789         struct tg3 *tp = netdev_priv(dev);
7790         struct sockaddr *addr = p;
7791         int err = 0, skip_mac_1 = 0;
7792
7793         if (!is_valid_ether_addr(addr->sa_data))
7794                 return -EINVAL;
7795
7796         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7797
7798         if (!netif_running(dev))
7799                 return 0;
7800
7801         if (tg3_flag(tp, ENABLE_ASF)) {
7802                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7803
7804                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7805                 addr0_low = tr32(MAC_ADDR_0_LOW);
7806                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7807                 addr1_low = tr32(MAC_ADDR_1_LOW);
7808
7809                 /* Skip MAC addr 1 if ASF is using it. */
7810                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7811                     !(addr1_high == 0 && addr1_low == 0))
7812                         skip_mac_1 = 1;
7813         }
7814         spin_lock_bh(&tp->lock);
7815         __tg3_set_mac_addr(tp, skip_mac_1);
7816         spin_unlock_bh(&tp->lock);
7817
7818         return err;
7819 }
7820
7821 /* tp->lock is held. */
7822 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7823                            dma_addr_t mapping, u32 maxlen_flags,
7824                            u32 nic_addr)
7825 {
7826         tg3_write_mem(tp,
7827                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7828                       ((u64) mapping >> 32));
7829         tg3_write_mem(tp,
7830                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7831                       ((u64) mapping & 0xffffffff));
7832         tg3_write_mem(tp,
7833                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7834                        maxlen_flags);
7835
7836         if (!tg3_flag(tp, 5705_PLUS))
7837                 tg3_write_mem(tp,
7838                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7839                               nic_addr);
7840 }
7841
7842 static void __tg3_set_rx_mode(struct net_device *);
7843 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7844 {
7845         int i;
7846
7847         if (!tg3_flag(tp, ENABLE_TSS)) {
7848                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7849                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7850                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7851         } else {
7852                 tw32(HOSTCC_TXCOL_TICKS, 0);
7853                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7854                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7855         }
7856
7857         if (!tg3_flag(tp, ENABLE_RSS)) {
7858                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7859                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7860                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7861         } else {
7862                 tw32(HOSTCC_RXCOL_TICKS, 0);
7863                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7864                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7865         }
7866
7867         if (!tg3_flag(tp, 5705_PLUS)) {
7868                 u32 val = ec->stats_block_coalesce_usecs;
7869
7870                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7871                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7872
7873                 if (!netif_carrier_ok(tp->dev))
7874                         val = 0;
7875
7876                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7877         }
7878
7879         for (i = 0; i < tp->irq_cnt - 1; i++) {
7880                 u32 reg;
7881
7882                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7883                 tw32(reg, ec->rx_coalesce_usecs);
7884                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7885                 tw32(reg, ec->rx_max_coalesced_frames);
7886                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7887                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7888
7889                 if (tg3_flag(tp, ENABLE_TSS)) {
7890                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7891                         tw32(reg, ec->tx_coalesce_usecs);
7892                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7893                         tw32(reg, ec->tx_max_coalesced_frames);
7894                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7895                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7896                 }
7897         }
7898
7899         for (; i < tp->irq_max - 1; i++) {
7900                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7901                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7902                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7903
7904                 if (tg3_flag(tp, ENABLE_TSS)) {
7905                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7906                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7907                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7908                 }
7909         }
7910 }
7911
7912 /* tp->lock is held. */
7913 static void tg3_rings_reset(struct tg3 *tp)
7914 {
7915         int i;
7916         u32 stblk, txrcb, rxrcb, limit;
7917         struct tg3_napi *tnapi = &tp->napi[0];
7918
7919         /* Disable all transmit rings but the first. */
7920         if (!tg3_flag(tp, 5705_PLUS))
7921                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7922         else if (tg3_flag(tp, 5717_PLUS))
7923                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7924         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7925                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7926         else
7927                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7928
7929         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7930              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7931                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7932                               BDINFO_FLAGS_DISABLED);
7933
7934
7935         /* Disable all receive return rings but the first. */
7936         if (tg3_flag(tp, 5717_PLUS))
7937                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7938         else if (!tg3_flag(tp, 5705_PLUS))
7939                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7940         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7941                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7942                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7943         else
7944                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7945
7946         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7947              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7948                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7949                               BDINFO_FLAGS_DISABLED);
7950
7951         /* Disable interrupts */
7952         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7953         tp->napi[0].chk_msi_cnt = 0;
7954         tp->napi[0].last_rx_cons = 0;
7955         tp->napi[0].last_tx_cons = 0;
7956
7957         /* Zero mailbox registers. */
7958         if (tg3_flag(tp, SUPPORT_MSIX)) {
7959                 for (i = 1; i < tp->irq_max; i++) {
7960                         tp->napi[i].tx_prod = 0;
7961                         tp->napi[i].tx_cons = 0;
7962                         if (tg3_flag(tp, ENABLE_TSS))
7963                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7964                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7965                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7966                         tp->napi[0].chk_msi_cnt = 0;
7967                         tp->napi[i].last_rx_cons = 0;
7968                         tp->napi[i].last_tx_cons = 0;
7969                 }
7970                 if (!tg3_flag(tp, ENABLE_TSS))
7971                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7972         } else {
7973                 tp->napi[0].tx_prod = 0;
7974                 tp->napi[0].tx_cons = 0;
7975                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7976                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7977         }
7978
7979         /* Make sure the NIC-based send BD rings are disabled. */
7980         if (!tg3_flag(tp, 5705_PLUS)) {
7981                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7982                 for (i = 0; i < 16; i++)
7983                         tw32_tx_mbox(mbox + i * 8, 0);
7984         }
7985
7986         txrcb = NIC_SRAM_SEND_RCB;
7987         rxrcb = NIC_SRAM_RCV_RET_RCB;
7988
7989         /* Clear status block in ram. */
7990         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7991
7992         /* Set status block DMA address */
7993         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7994              ((u64) tnapi->status_mapping >> 32));
7995         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7996              ((u64) tnapi->status_mapping & 0xffffffff));
7997
7998         if (tnapi->tx_ring) {
7999                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8000                                (TG3_TX_RING_SIZE <<
8001                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8002                                NIC_SRAM_TX_BUFFER_DESC);
8003                 txrcb += TG3_BDINFO_SIZE;
8004         }
8005
8006         if (tnapi->rx_rcb) {
8007                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8008                                (tp->rx_ret_ring_mask + 1) <<
8009                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8010                 rxrcb += TG3_BDINFO_SIZE;
8011         }
8012
8013         stblk = HOSTCC_STATBLCK_RING1;
8014
8015         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8016                 u64 mapping = (u64)tnapi->status_mapping;
8017                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8018                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8019
8020                 /* Clear status block in ram. */
8021                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8022
8023                 if (tnapi->tx_ring) {
8024                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8025                                        (TG3_TX_RING_SIZE <<
8026                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8027                                        NIC_SRAM_TX_BUFFER_DESC);
8028                         txrcb += TG3_BDINFO_SIZE;
8029                 }
8030
8031                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8032                                ((tp->rx_ret_ring_mask + 1) <<
8033                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8034
8035                 stblk += 8;
8036                 rxrcb += TG3_BDINFO_SIZE;
8037         }
8038 }
8039
8040 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8041 {
8042         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8043
8044         if (!tg3_flag(tp, 5750_PLUS) ||
8045             tg3_flag(tp, 5780_CLASS) ||
8046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8048                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8049         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8050                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8051                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8052         else
8053                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8054
8055         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8056         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8057
8058         val = min(nic_rep_thresh, host_rep_thresh);
8059         tw32(RCVBDI_STD_THRESH, val);
8060
8061         if (tg3_flag(tp, 57765_PLUS))
8062                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8063
8064         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8065                 return;
8066
8067         if (!tg3_flag(tp, 5705_PLUS))
8068                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8069         else
8070                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8071
8072         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8073
8074         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8075         tw32(RCVBDI_JUMBO_THRESH, val);
8076
8077         if (tg3_flag(tp, 57765_PLUS))
8078                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8079 }
8080
8081 /* tp->lock is held. */
8082 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8083 {
8084         u32 val, rdmac_mode;
8085         int i, err, limit;
8086         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8087
8088         tg3_disable_ints(tp);
8089
8090         tg3_stop_fw(tp);
8091
8092         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8093
8094         if (tg3_flag(tp, INIT_COMPLETE))
8095                 tg3_abort_hw(tp, 1);
8096
8097         /* Enable MAC control of LPI */
8098         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8099                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8100                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8101                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8102
8103                 tw32_f(TG3_CPMU_EEE_CTRL,
8104                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8105
8106                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8107                       TG3_CPMU_EEEMD_LPI_IN_TX |
8108                       TG3_CPMU_EEEMD_LPI_IN_RX |
8109                       TG3_CPMU_EEEMD_EEE_ENABLE;
8110
8111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8112                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8113
8114                 if (tg3_flag(tp, ENABLE_APE))
8115                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8116
8117                 tw32_f(TG3_CPMU_EEE_MODE, val);
8118
8119                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8120                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8121                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8122
8123                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8124                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8125                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8126         }
8127
8128         if (reset_phy)
8129                 tg3_phy_reset(tp);
8130
8131         err = tg3_chip_reset(tp);
8132         if (err)
8133                 return err;
8134
8135         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8136
8137         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8138                 val = tr32(TG3_CPMU_CTRL);
8139                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8140                 tw32(TG3_CPMU_CTRL, val);
8141
8142                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8143                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8144                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8145                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8146
8147                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8148                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8149                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8150                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8151
8152                 val = tr32(TG3_CPMU_HST_ACC);
8153                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8154                 val |= CPMU_HST_ACC_MACCLK_6_25;
8155                 tw32(TG3_CPMU_HST_ACC, val);
8156         }
8157
8158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8159                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8160                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8161                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8162                 tw32(PCIE_PWR_MGMT_THRESH, val);
8163
8164                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8165                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8166
8167                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8168
8169                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8170                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8171         }
8172
8173         if (tg3_flag(tp, L1PLLPD_EN)) {
8174                 u32 grc_mode = tr32(GRC_MODE);
8175
8176                 /* Access the lower 1K of PL PCIE block registers. */
8177                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8178                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8179
8180                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8181                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8182                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8183
8184                 tw32(GRC_MODE, grc_mode);
8185         }
8186
8187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8188                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8189                         u32 grc_mode = tr32(GRC_MODE);
8190
8191                         /* Access the lower 1K of PL PCIE block registers. */
8192                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8193                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8194
8195                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8196                                    TG3_PCIE_PL_LO_PHYCTL5);
8197                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8198                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8199
8200                         tw32(GRC_MODE, grc_mode);
8201                 }
8202
8203                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8204                         u32 grc_mode = tr32(GRC_MODE);
8205
8206                         /* Access the lower 1K of DL PCIE block registers. */
8207                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8208                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8209
8210                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8211                                    TG3_PCIE_DL_LO_FTSMAX);
8212                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8213                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8214                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8215
8216                         tw32(GRC_MODE, grc_mode);
8217                 }
8218
8219                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8220                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8221                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8222                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8223         }
8224
8225         /* This works around an issue with Athlon chipsets on
8226          * B3 tigon3 silicon.  This bit has no effect on any
8227          * other revision.  But do not set this on PCI Express
8228          * chips and don't even touch the clocks if the CPMU is present.
8229          */
8230         if (!tg3_flag(tp, CPMU_PRESENT)) {
8231                 if (!tg3_flag(tp, PCI_EXPRESS))
8232                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8233                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8234         }
8235
8236         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8237             tg3_flag(tp, PCIX_MODE)) {
8238                 val = tr32(TG3PCI_PCISTATE);
8239                 val |= PCISTATE_RETRY_SAME_DMA;
8240                 tw32(TG3PCI_PCISTATE, val);
8241         }
8242
8243         if (tg3_flag(tp, ENABLE_APE)) {
8244                 /* Allow reads and writes to the
8245                  * APE register and memory space.
8246                  */
8247                 val = tr32(TG3PCI_PCISTATE);
8248                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8249                        PCISTATE_ALLOW_APE_SHMEM_WR |
8250                        PCISTATE_ALLOW_APE_PSPACE_WR;
8251                 tw32(TG3PCI_PCISTATE, val);
8252         }
8253
8254         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8255                 /* Enable some hw fixes.  */
8256                 val = tr32(TG3PCI_MSI_DATA);
8257                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8258                 tw32(TG3PCI_MSI_DATA, val);
8259         }
8260
8261         /* Descriptor ring init may make accesses to the
8262          * NIC SRAM area to setup the TX descriptors, so we
8263          * can only do this after the hardware has been
8264          * successfully reset.
8265          */
8266         err = tg3_init_rings(tp);
8267         if (err)
8268                 return err;
8269
8270         if (tg3_flag(tp, 57765_PLUS)) {
8271                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8272                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8273                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8274                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8275                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8276                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8277                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8278                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8279         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8280                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8281                 /* This value is determined during the probe time DMA
8282                  * engine test, tg3_test_dma.
8283                  */
8284                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8285         }
8286
8287         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8288                           GRC_MODE_4X_NIC_SEND_RINGS |
8289                           GRC_MODE_NO_TX_PHDR_CSUM |
8290                           GRC_MODE_NO_RX_PHDR_CSUM);
8291         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8292
8293         /* Pseudo-header checksum is done by hardware logic and not
8294          * the offload processers, so make the chip do the pseudo-
8295          * header checksums on receive.  For transmit it is more
8296          * convenient to do the pseudo-header checksum in software
8297          * as Linux does that on transmit for us in all cases.
8298          */
8299         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8300
8301         tw32(GRC_MODE,
8302              tp->grc_mode |
8303              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8304
8305         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8306         val = tr32(GRC_MISC_CFG);
8307         val &= ~0xff;
8308         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8309         tw32(GRC_MISC_CFG, val);
8310
8311         /* Initialize MBUF/DESC pool. */
8312         if (tg3_flag(tp, 5750_PLUS)) {
8313                 /* Do nothing.  */
8314         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8315                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8316                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8317                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8318                 else
8319                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8320                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8321                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8322         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8323                 int fw_len;
8324
8325                 fw_len = tp->fw_len;
8326                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8327                 tw32(BUFMGR_MB_POOL_ADDR,
8328                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8329                 tw32(BUFMGR_MB_POOL_SIZE,
8330                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8331         }
8332
8333         if (tp->dev->mtu <= ETH_DATA_LEN) {
8334                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8335                      tp->bufmgr_config.mbuf_read_dma_low_water);
8336                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8337                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8338                 tw32(BUFMGR_MB_HIGH_WATER,
8339                      tp->bufmgr_config.mbuf_high_water);
8340         } else {
8341                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8342                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8343                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8344                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8345                 tw32(BUFMGR_MB_HIGH_WATER,
8346                      tp->bufmgr_config.mbuf_high_water_jumbo);
8347         }
8348         tw32(BUFMGR_DMA_LOW_WATER,
8349              tp->bufmgr_config.dma_low_water);
8350         tw32(BUFMGR_DMA_HIGH_WATER,
8351              tp->bufmgr_config.dma_high_water);
8352
8353         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8355                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8357             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8358             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8359                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8360         tw32(BUFMGR_MODE, val);
8361         for (i = 0; i < 2000; i++) {
8362                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8363                         break;
8364                 udelay(10);
8365         }
8366         if (i >= 2000) {
8367                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8368                 return -ENODEV;
8369         }
8370
8371         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8372                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8373
8374         tg3_setup_rxbd_thresholds(tp);
8375
8376         /* Initialize TG3_BDINFO's at:
8377          *  RCVDBDI_STD_BD:     standard eth size rx ring
8378          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8379          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8380          *
8381          * like so:
8382          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8383          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8384          *                              ring attribute flags
8385          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8386          *
8387          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8388          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8389          *
8390          * The size of each ring is fixed in the firmware, but the location is
8391          * configurable.
8392          */
8393         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8394              ((u64) tpr->rx_std_mapping >> 32));
8395         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8396              ((u64) tpr->rx_std_mapping & 0xffffffff));
8397         if (!tg3_flag(tp, 5717_PLUS))
8398                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8399                      NIC_SRAM_RX_BUFFER_DESC);
8400
8401         /* Disable the mini ring */
8402         if (!tg3_flag(tp, 5705_PLUS))
8403                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8404                      BDINFO_FLAGS_DISABLED);
8405
8406         /* Program the jumbo buffer descriptor ring control
8407          * blocks on those devices that have them.
8408          */
8409         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8410             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8411
8412                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8413                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8414                              ((u64) tpr->rx_jmb_mapping >> 32));
8415                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8416                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8417                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8418                               BDINFO_FLAGS_MAXLEN_SHIFT;
8419                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8420                              val | BDINFO_FLAGS_USE_EXT_RECV);
8421                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8422                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8423                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8424                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8425                 } else {
8426                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8427                              BDINFO_FLAGS_DISABLED);
8428                 }
8429
8430                 if (tg3_flag(tp, 57765_PLUS)) {
8431                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8432                                 val = TG3_RX_STD_MAX_SIZE_5700;
8433                         else
8434                                 val = TG3_RX_STD_MAX_SIZE_5717;
8435                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8436                         val |= (TG3_RX_STD_DMA_SZ << 2);
8437                 } else
8438                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8439         } else
8440                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8441
8442         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8443
8444         tpr->rx_std_prod_idx = tp->rx_pending;
8445         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8446
8447         tpr->rx_jmb_prod_idx =
8448                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8449         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8450
8451         tg3_rings_reset(tp);
8452
8453         /* Initialize MAC address and backoff seed. */
8454         __tg3_set_mac_addr(tp, 0);
8455
8456         /* MTU + ethernet header + FCS + optional VLAN tag */
8457         tw32(MAC_RX_MTU_SIZE,
8458              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8459
8460         /* The slot time is changed by tg3_setup_phy if we
8461          * run at gigabit with half duplex.
8462          */
8463         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8464               (6 << TX_LENGTHS_IPG_SHIFT) |
8465               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8466
8467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8468                 val |= tr32(MAC_TX_LENGTHS) &
8469                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8470                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8471
8472         tw32(MAC_TX_LENGTHS, val);
8473
8474         /* Receive rules. */
8475         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8476         tw32(RCVLPC_CONFIG, 0x0181);
8477
8478         /* Calculate RDMAC_MODE setting early, we need it to determine
8479          * the RCVLPC_STATE_ENABLE mask.
8480          */
8481         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8482                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8483                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8484                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8485                       RDMAC_MODE_LNGREAD_ENAB);
8486
8487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8488                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8489
8490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8492             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8493                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8494                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8495                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8496
8497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8498             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8499                 if (tg3_flag(tp, TSO_CAPABLE) &&
8500                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8501                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8502                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8503                            !tg3_flag(tp, IS_5788)) {
8504                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8505                 }
8506         }
8507
8508         if (tg3_flag(tp, PCI_EXPRESS))
8509                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8510
8511         if (tg3_flag(tp, HW_TSO_1) ||
8512             tg3_flag(tp, HW_TSO_2) ||
8513             tg3_flag(tp, HW_TSO_3))
8514                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8515
8516         if (tg3_flag(tp, 57765_PLUS) ||
8517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8519                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8520
8521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8522                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8523
8524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8528             tg3_flag(tp, 57765_PLUS)) {
8529                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8531                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8532                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8533                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8534                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8535                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8536                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8537                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8538                 }
8539                 tw32(TG3_RDMA_RSRVCTRL_REG,
8540                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8541         }
8542
8543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8545                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8546                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8547                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8548                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8549         }
8550
8551         /* Receive/send statistics. */
8552         if (tg3_flag(tp, 5750_PLUS)) {
8553                 val = tr32(RCVLPC_STATS_ENABLE);
8554                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8555                 tw32(RCVLPC_STATS_ENABLE, val);
8556         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8557                    tg3_flag(tp, TSO_CAPABLE)) {
8558                 val = tr32(RCVLPC_STATS_ENABLE);
8559                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8560                 tw32(RCVLPC_STATS_ENABLE, val);
8561         } else {
8562                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8563         }
8564         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8565         tw32(SNDDATAI_STATSENAB, 0xffffff);
8566         tw32(SNDDATAI_STATSCTRL,
8567              (SNDDATAI_SCTRL_ENABLE |
8568               SNDDATAI_SCTRL_FASTUPD));
8569
8570         /* Setup host coalescing engine. */
8571         tw32(HOSTCC_MODE, 0);
8572         for (i = 0; i < 2000; i++) {
8573                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8574                         break;
8575                 udelay(10);
8576         }
8577
8578         __tg3_set_coalesce(tp, &tp->coal);
8579
8580         if (!tg3_flag(tp, 5705_PLUS)) {
8581                 /* Status/statistics block address.  See tg3_timer,
8582                  * the tg3_periodic_fetch_stats call there, and
8583                  * tg3_get_stats to see how this works for 5705/5750 chips.
8584                  */
8585                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8586                      ((u64) tp->stats_mapping >> 32));
8587                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8588                      ((u64) tp->stats_mapping & 0xffffffff));
8589                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8590
8591                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8592
8593                 /* Clear statistics and status block memory areas */
8594                 for (i = NIC_SRAM_STATS_BLK;
8595                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8596                      i += sizeof(u32)) {
8597                         tg3_write_mem(tp, i, 0);
8598                         udelay(40);
8599                 }
8600         }
8601
8602         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8603
8604         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8605         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8606         if (!tg3_flag(tp, 5705_PLUS))
8607                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8608
8609         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8610                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8611                 /* reset to prevent losing 1st rx packet intermittently */
8612                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8613                 udelay(10);
8614         }
8615
8616         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8617                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8618                         MAC_MODE_FHDE_ENABLE;
8619         if (tg3_flag(tp, ENABLE_APE))
8620                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8621         if (!tg3_flag(tp, 5705_PLUS) &&
8622             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8623             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8624                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8625         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8626         udelay(40);
8627
8628         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8629          * If TG3_FLAG_IS_NIC is zero, we should read the
8630          * register to preserve the GPIO settings for LOMs. The GPIOs,
8631          * whether used as inputs or outputs, are set by boot code after
8632          * reset.
8633          */
8634         if (!tg3_flag(tp, IS_NIC)) {
8635                 u32 gpio_mask;
8636
8637                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8638                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8639                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8640
8641                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8642                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8643                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8644
8645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8646                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8647
8648                 tp->grc_local_ctrl &= ~gpio_mask;
8649                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8650
8651                 /* GPIO1 must be driven high for eeprom write protect */
8652                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8653                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8654                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8655         }
8656         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8657         udelay(100);
8658
8659         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8660                 val = tr32(MSGINT_MODE);
8661                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8662                 tw32(MSGINT_MODE, val);
8663         }
8664
8665         if (!tg3_flag(tp, 5705_PLUS)) {
8666                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8667                 udelay(40);
8668         }
8669
8670         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8671                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8672                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8673                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8674                WDMAC_MODE_LNGREAD_ENAB);
8675
8676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8677             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8678                 if (tg3_flag(tp, TSO_CAPABLE) &&
8679                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8680                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8681                         /* nothing */
8682                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8683                            !tg3_flag(tp, IS_5788)) {
8684                         val |= WDMAC_MODE_RX_ACCEL;
8685                 }
8686         }
8687
8688         /* Enable host coalescing bug fix */
8689         if (tg3_flag(tp, 5755_PLUS))
8690                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8691
8692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8693                 val |= WDMAC_MODE_BURST_ALL_DATA;
8694
8695         tw32_f(WDMAC_MODE, val);
8696         udelay(40);
8697
8698         if (tg3_flag(tp, PCIX_MODE)) {
8699                 u16 pcix_cmd;
8700
8701                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8702                                      &pcix_cmd);
8703                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8704                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8705                         pcix_cmd |= PCI_X_CMD_READ_2K;
8706                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8707                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8708                         pcix_cmd |= PCI_X_CMD_READ_2K;
8709                 }
8710                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8711                                       pcix_cmd);
8712         }
8713
8714         tw32_f(RDMAC_MODE, rdmac_mode);
8715         udelay(40);
8716
8717         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8718         if (!tg3_flag(tp, 5705_PLUS))
8719                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8720
8721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8722                 tw32(SNDDATAC_MODE,
8723                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8724         else
8725                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8726
8727         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8728         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8729         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8730         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8731                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8732         tw32(RCVDBDI_MODE, val);
8733         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8734         if (tg3_flag(tp, HW_TSO_1) ||
8735             tg3_flag(tp, HW_TSO_2) ||
8736             tg3_flag(tp, HW_TSO_3))
8737                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8738         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8739         if (tg3_flag(tp, ENABLE_TSS))
8740                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8741         tw32(SNDBDI_MODE, val);
8742         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8743
8744         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8745                 err = tg3_load_5701_a0_firmware_fix(tp);
8746                 if (err)
8747                         return err;
8748         }
8749
8750         if (tg3_flag(tp, TSO_CAPABLE)) {
8751                 err = tg3_load_tso_firmware(tp);
8752                 if (err)
8753                         return err;
8754         }
8755
8756         tp->tx_mode = TX_MODE_ENABLE;
8757
8758         if (tg3_flag(tp, 5755_PLUS) ||
8759             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8760                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8761
8762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8763                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8764                 tp->tx_mode &= ~val;
8765                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8766         }
8767
8768         tw32_f(MAC_TX_MODE, tp->tx_mode);
8769         udelay(100);
8770
8771         if (tg3_flag(tp, ENABLE_RSS)) {
8772                 int i = 0;
8773                 u32 reg = MAC_RSS_INDIR_TBL_0;
8774
8775                 if (tp->irq_cnt == 2) {
8776                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8777                                 tw32(reg, 0x0);
8778                                 reg += 4;
8779                         }
8780                 } else {
8781                         u32 val;
8782
8783                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8784                                 val = i % (tp->irq_cnt - 1);
8785                                 i++;
8786                                 for (; i % 8; i++) {
8787                                         val <<= 4;
8788                                         val |= (i % (tp->irq_cnt - 1));
8789                                 }
8790                                 tw32(reg, val);
8791                                 reg += 4;
8792                         }
8793                 }
8794
8795                 /* Setup the "secret" hash key. */
8796                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8797                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8798                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8799                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8800                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8801                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8802                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8803                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8804                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8805                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8806         }
8807
8808         tp->rx_mode = RX_MODE_ENABLE;
8809         if (tg3_flag(tp, 5755_PLUS))
8810                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8811
8812         if (tg3_flag(tp, ENABLE_RSS))
8813                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8814                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8815                                RX_MODE_RSS_IPV6_HASH_EN |
8816                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8817                                RX_MODE_RSS_IPV4_HASH_EN |
8818                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8819
8820         tw32_f(MAC_RX_MODE, tp->rx_mode);
8821         udelay(10);
8822
8823         tw32(MAC_LED_CTRL, tp->led_ctrl);
8824
8825         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8826         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8827                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8828                 udelay(10);
8829         }
8830         tw32_f(MAC_RX_MODE, tp->rx_mode);
8831         udelay(10);
8832
8833         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8834                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8835                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8836                         /* Set drive transmission level to 1.2V  */
8837                         /* only if the signal pre-emphasis bit is not set  */
8838                         val = tr32(MAC_SERDES_CFG);
8839                         val &= 0xfffff000;
8840                         val |= 0x880;
8841                         tw32(MAC_SERDES_CFG, val);
8842                 }
8843                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8844                         tw32(MAC_SERDES_CFG, 0x616000);
8845         }
8846
8847         /* Prevent chip from dropping frames when flow control
8848          * is enabled.
8849          */
8850         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8851                 val = 1;
8852         else
8853                 val = 2;
8854         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8855
8856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8857             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8858                 /* Use hardware link auto-negotiation */
8859                 tg3_flag_set(tp, HW_AUTONEG);
8860         }
8861
8862         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8863             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8864                 u32 tmp;
8865
8866                 tmp = tr32(SERDES_RX_CTRL);
8867                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8868                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8869                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8870                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8871         }
8872
8873         if (!tg3_flag(tp, USE_PHYLIB)) {
8874                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8875                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8876                         tp->link_config.speed = tp->link_config.orig_speed;
8877                         tp->link_config.duplex = tp->link_config.orig_duplex;
8878                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8879                 }
8880
8881                 err = tg3_setup_phy(tp, 0);
8882                 if (err)
8883                         return err;
8884
8885                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8886                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8887                         u32 tmp;
8888
8889                         /* Clear CRC stats. */
8890                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8891                                 tg3_writephy(tp, MII_TG3_TEST1,
8892                                              tmp | MII_TG3_TEST1_CRC_EN);
8893                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8894                         }
8895                 }
8896         }
8897
8898         __tg3_set_rx_mode(tp->dev);
8899
8900         /* Initialize receive rules. */
8901         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8902         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8903         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8904         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8905
8906         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8907                 limit = 8;
8908         else
8909                 limit = 16;
8910         if (tg3_flag(tp, ENABLE_ASF))
8911                 limit -= 4;
8912         switch (limit) {
8913         case 16:
8914                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8915         case 15:
8916                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8917         case 14:
8918                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8919         case 13:
8920                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8921         case 12:
8922                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8923         case 11:
8924                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8925         case 10:
8926                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8927         case 9:
8928                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8929         case 8:
8930                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8931         case 7:
8932                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8933         case 6:
8934                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8935         case 5:
8936                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8937         case 4:
8938                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8939         case 3:
8940                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8941         case 2:
8942         case 1:
8943
8944         default:
8945                 break;
8946         }
8947
8948         if (tg3_flag(tp, ENABLE_APE))
8949                 /* Write our heartbeat update interval to APE. */
8950                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8951                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8952
8953         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8954
8955         return 0;
8956 }
8957
8958 /* Called at device open time to get the chip ready for
8959  * packet processing.  Invoked with tp->lock held.
8960  */
8961 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8962 {
8963         tg3_switch_clocks(tp);
8964
8965         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8966
8967         return tg3_reset_hw(tp, reset_phy);
8968 }
8969
8970 #define TG3_STAT_ADD32(PSTAT, REG) \
8971 do {    u32 __val = tr32(REG); \
8972         (PSTAT)->low += __val; \
8973         if ((PSTAT)->low < __val) \
8974                 (PSTAT)->high += 1; \
8975 } while (0)
8976
8977 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8978 {
8979         struct tg3_hw_stats *sp = tp->hw_stats;
8980
8981         if (!netif_carrier_ok(tp->dev))
8982                 return;
8983
8984         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8985         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8986         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8987         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8988         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8989         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8990         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8991         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8992         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8993         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8994         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8995         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8996         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8997
8998         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8999         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9000         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9001         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9002         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9003         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9004         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9005         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9006         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9007         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9008         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9009         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9010         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9011         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9012
9013         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9014         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9015             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9016             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9017                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9018         } else {
9019                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9020                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9021                 if (val) {
9022                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9023                         sp->rx_discards.low += val;
9024                         if (sp->rx_discards.low < val)
9025                                 sp->rx_discards.high += 1;
9026                 }
9027                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9028         }
9029         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9030 }
9031
9032 static void tg3_chk_missed_msi(struct tg3 *tp)
9033 {
9034         u32 i;
9035
9036         for (i = 0; i < tp->irq_cnt; i++) {
9037                 struct tg3_napi *tnapi = &tp->napi[i];
9038
9039                 if (tg3_has_work(tnapi)) {
9040                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9041                             tnapi->last_tx_cons == tnapi->tx_cons) {
9042                                 if (tnapi->chk_msi_cnt < 1) {
9043                                         tnapi->chk_msi_cnt++;
9044                                         return;
9045                                 }
9046                                 tw32_mailbox(tnapi->int_mbox,
9047                                              tnapi->last_tag << 24);
9048                         }
9049                 }
9050                 tnapi->chk_msi_cnt = 0;
9051                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9052                 tnapi->last_tx_cons = tnapi->tx_cons;
9053         }
9054 }
9055
9056 static void tg3_timer(unsigned long __opaque)
9057 {
9058         struct tg3 *tp = (struct tg3 *) __opaque;
9059
9060         if (tp->irq_sync)
9061                 goto restart_timer;
9062
9063         spin_lock(&tp->lock);
9064
9065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9067                 tg3_chk_missed_msi(tp);
9068
9069         if (!tg3_flag(tp, TAGGED_STATUS)) {
9070                 /* All of this garbage is because when using non-tagged
9071                  * IRQ status the mailbox/status_block protocol the chip
9072                  * uses with the cpu is race prone.
9073                  */
9074                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9075                         tw32(GRC_LOCAL_CTRL,
9076                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9077                 } else {
9078                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9079                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9080                 }
9081
9082                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9083                         tg3_flag_set(tp, RESTART_TIMER);
9084                         spin_unlock(&tp->lock);
9085                         schedule_work(&tp->reset_task);
9086                         return;
9087                 }
9088         }
9089
9090         /* This part only runs once per second. */
9091         if (!--tp->timer_counter) {
9092                 if (tg3_flag(tp, 5705_PLUS))
9093                         tg3_periodic_fetch_stats(tp);
9094
9095                 if (tp->setlpicnt && !--tp->setlpicnt)
9096                         tg3_phy_eee_enable(tp);
9097
9098                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9099                         u32 mac_stat;
9100                         int phy_event;
9101
9102                         mac_stat = tr32(MAC_STATUS);
9103
9104                         phy_event = 0;
9105                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9106                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9107                                         phy_event = 1;
9108                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9109                                 phy_event = 1;
9110
9111                         if (phy_event)
9112                                 tg3_setup_phy(tp, 0);
9113                 } else if (tg3_flag(tp, POLL_SERDES)) {
9114                         u32 mac_stat = tr32(MAC_STATUS);
9115                         int need_setup = 0;
9116
9117                         if (netif_carrier_ok(tp->dev) &&
9118                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9119                                 need_setup = 1;
9120                         }
9121                         if (!netif_carrier_ok(tp->dev) &&
9122                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9123                                          MAC_STATUS_SIGNAL_DET))) {
9124                                 need_setup = 1;
9125                         }
9126                         if (need_setup) {
9127                                 if (!tp->serdes_counter) {
9128                                         tw32_f(MAC_MODE,
9129                                              (tp->mac_mode &
9130                                               ~MAC_MODE_PORT_MODE_MASK));
9131                                         udelay(40);
9132                                         tw32_f(MAC_MODE, tp->mac_mode);
9133                                         udelay(40);
9134                                 }
9135                                 tg3_setup_phy(tp, 0);
9136                         }
9137                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9138                            tg3_flag(tp, 5780_CLASS)) {
9139                         tg3_serdes_parallel_detect(tp);
9140                 }
9141
9142                 tp->timer_counter = tp->timer_multiplier;
9143         }
9144
9145         /* Heartbeat is only sent once every 2 seconds.
9146          *
9147          * The heartbeat is to tell the ASF firmware that the host
9148          * driver is still alive.  In the event that the OS crashes,
9149          * ASF needs to reset the hardware to free up the FIFO space
9150          * that may be filled with rx packets destined for the host.
9151          * If the FIFO is full, ASF will no longer function properly.
9152          *
9153          * Unintended resets have been reported on real time kernels
9154          * where the timer doesn't run on time.  Netpoll will also have
9155          * same problem.
9156          *
9157          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9158          * to check the ring condition when the heartbeat is expiring
9159          * before doing the reset.  This will prevent most unintended
9160          * resets.
9161          */
9162         if (!--tp->asf_counter) {
9163                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9164                         tg3_wait_for_event_ack(tp);
9165
9166                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9167                                       FWCMD_NICDRV_ALIVE3);
9168                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9169                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9170                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9171
9172                         tg3_generate_fw_event(tp);
9173                 }
9174                 tp->asf_counter = tp->asf_multiplier;
9175         }
9176
9177         spin_unlock(&tp->lock);
9178
9179 restart_timer:
9180         tp->timer.expires = jiffies + tp->timer_offset;
9181         add_timer(&tp->timer);
9182 }
9183
9184 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9185 {
9186         irq_handler_t fn;
9187         unsigned long flags;
9188         char *name;
9189         struct tg3_napi *tnapi = &tp->napi[irq_num];
9190
9191         if (tp->irq_cnt == 1)
9192                 name = tp->dev->name;
9193         else {
9194                 name = &tnapi->irq_lbl[0];
9195                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9196                 name[IFNAMSIZ-1] = 0;
9197         }
9198
9199         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9200                 fn = tg3_msi;
9201                 if (tg3_flag(tp, 1SHOT_MSI))
9202                         fn = tg3_msi_1shot;
9203                 flags = 0;
9204         } else {
9205                 fn = tg3_interrupt;
9206                 if (tg3_flag(tp, TAGGED_STATUS))
9207                         fn = tg3_interrupt_tagged;
9208                 flags = IRQF_SHARED;
9209         }
9210
9211         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9212 }
9213
9214 static int tg3_test_interrupt(struct tg3 *tp)
9215 {
9216         struct tg3_napi *tnapi = &tp->napi[0];
9217         struct net_device *dev = tp->dev;
9218         int err, i, intr_ok = 0;
9219         u32 val;
9220
9221         if (!netif_running(dev))
9222                 return -ENODEV;
9223
9224         tg3_disable_ints(tp);
9225
9226         free_irq(tnapi->irq_vec, tnapi);
9227
9228         /*
9229          * Turn off MSI one shot mode.  Otherwise this test has no
9230          * observable way to know whether the interrupt was delivered.
9231          */
9232         if (tg3_flag(tp, 57765_PLUS)) {
9233                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9234                 tw32(MSGINT_MODE, val);
9235         }
9236
9237         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9238                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9239         if (err)
9240                 return err;
9241
9242         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9243         tg3_enable_ints(tp);
9244
9245         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9246                tnapi->coal_now);
9247
9248         for (i = 0; i < 5; i++) {
9249                 u32 int_mbox, misc_host_ctrl;
9250
9251                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9252                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9253
9254                 if ((int_mbox != 0) ||
9255                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9256                         intr_ok = 1;
9257                         break;
9258                 }
9259
9260                 if (tg3_flag(tp, 57765_PLUS) &&
9261                     tnapi->hw_status->status_tag != tnapi->last_tag)
9262                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9263
9264                 msleep(10);
9265         }
9266
9267         tg3_disable_ints(tp);
9268
9269         free_irq(tnapi->irq_vec, tnapi);
9270
9271         err = tg3_request_irq(tp, 0);
9272
9273         if (err)
9274                 return err;
9275
9276         if (intr_ok) {
9277                 /* Reenable MSI one shot mode. */
9278                 if (tg3_flag(tp, 57765_PLUS)) {
9279                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9280                         tw32(MSGINT_MODE, val);
9281                 }
9282                 return 0;
9283         }
9284
9285         return -EIO;
9286 }
9287
9288 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9289  * successfully restored
9290  */
9291 static int tg3_test_msi(struct tg3 *tp)
9292 {
9293         int err;
9294         u16 pci_cmd;
9295
9296         if (!tg3_flag(tp, USING_MSI))
9297                 return 0;
9298
9299         /* Turn off SERR reporting in case MSI terminates with Master
9300          * Abort.
9301          */
9302         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9303         pci_write_config_word(tp->pdev, PCI_COMMAND,
9304                               pci_cmd & ~PCI_COMMAND_SERR);
9305
9306         err = tg3_test_interrupt(tp);
9307
9308         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9309
9310         if (!err)
9311                 return 0;
9312
9313         /* other failures */
9314         if (err != -EIO)
9315                 return err;
9316
9317         /* MSI test failed, go back to INTx mode */
9318         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9319                     "to INTx mode. Please report this failure to the PCI "
9320                     "maintainer and include system chipset information\n");
9321
9322         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9323
9324         pci_disable_msi(tp->pdev);
9325
9326         tg3_flag_clear(tp, USING_MSI);
9327         tp->napi[0].irq_vec = tp->pdev->irq;
9328
9329         err = tg3_request_irq(tp, 0);
9330         if (err)
9331                 return err;
9332
9333         /* Need to reset the chip because the MSI cycle may have terminated
9334          * with Master Abort.
9335          */
9336         tg3_full_lock(tp, 1);
9337
9338         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9339         err = tg3_init_hw(tp, 1);
9340
9341         tg3_full_unlock(tp);
9342
9343         if (err)
9344                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9345
9346         return err;
9347 }
9348
9349 static int tg3_request_firmware(struct tg3 *tp)
9350 {
9351         const __be32 *fw_data;
9352
9353         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9354                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9355                            tp->fw_needed);
9356                 return -ENOENT;
9357         }
9358
9359         fw_data = (void *)tp->fw->data;
9360
9361         /* Firmware blob starts with version numbers, followed by
9362          * start address and _full_ length including BSS sections
9363          * (which must be longer than the actual data, of course
9364          */
9365
9366         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9367         if (tp->fw_len < (tp->fw->size - 12)) {
9368                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9369                            tp->fw_len, tp->fw_needed);
9370                 release_firmware(tp->fw);
9371                 tp->fw = NULL;
9372                 return -EINVAL;
9373         }
9374
9375         /* We no longer need firmware; we have it. */
9376         tp->fw_needed = NULL;
9377         return 0;
9378 }
9379
9380 static bool tg3_enable_msix(struct tg3 *tp)
9381 {
9382         int i, rc, cpus = num_online_cpus();
9383         struct msix_entry msix_ent[tp->irq_max];
9384
9385         if (cpus == 1)
9386                 /* Just fallback to the simpler MSI mode. */
9387                 return false;
9388
9389         /*
9390          * We want as many rx rings enabled as there are cpus.
9391          * The first MSIX vector only deals with link interrupts, etc,
9392          * so we add one to the number of vectors we are requesting.
9393          */
9394         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9395
9396         for (i = 0; i < tp->irq_max; i++) {
9397                 msix_ent[i].entry  = i;
9398                 msix_ent[i].vector = 0;
9399         }
9400
9401         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9402         if (rc < 0) {
9403                 return false;
9404         } else if (rc != 0) {
9405                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9406                         return false;
9407                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9408                               tp->irq_cnt, rc);
9409                 tp->irq_cnt = rc;
9410         }
9411
9412         for (i = 0; i < tp->irq_max; i++)
9413                 tp->napi[i].irq_vec = msix_ent[i].vector;
9414
9415         netif_set_real_num_tx_queues(tp->dev, 1);
9416         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9417         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9418                 pci_disable_msix(tp->pdev);
9419                 return false;
9420         }
9421
9422         if (tp->irq_cnt > 1) {
9423                 tg3_flag_set(tp, ENABLE_RSS);
9424
9425                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9426                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9427                         tg3_flag_set(tp, ENABLE_TSS);
9428                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9429                 }
9430         }
9431
9432         return true;
9433 }
9434
9435 static void tg3_ints_init(struct tg3 *tp)
9436 {
9437         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9438             !tg3_flag(tp, TAGGED_STATUS)) {
9439                 /* All MSI supporting chips should support tagged
9440                  * status.  Assert that this is the case.
9441                  */
9442                 netdev_warn(tp->dev,
9443                             "MSI without TAGGED_STATUS? Not using MSI\n");
9444                 goto defcfg;
9445         }
9446
9447         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9448                 tg3_flag_set(tp, USING_MSIX);
9449         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9450                 tg3_flag_set(tp, USING_MSI);
9451
9452         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9453                 u32 msi_mode = tr32(MSGINT_MODE);
9454                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9455                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9456                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9457         }
9458 defcfg:
9459         if (!tg3_flag(tp, USING_MSIX)) {
9460                 tp->irq_cnt = 1;
9461                 tp->napi[0].irq_vec = tp->pdev->irq;
9462                 netif_set_real_num_tx_queues(tp->dev, 1);
9463                 netif_set_real_num_rx_queues(tp->dev, 1);
9464         }
9465 }
9466
9467 static void tg3_ints_fini(struct tg3 *tp)
9468 {
9469         if (tg3_flag(tp, USING_MSIX))
9470                 pci_disable_msix(tp->pdev);
9471         else if (tg3_flag(tp, USING_MSI))
9472                 pci_disable_msi(tp->pdev);
9473         tg3_flag_clear(tp, USING_MSI);
9474         tg3_flag_clear(tp, USING_MSIX);
9475         tg3_flag_clear(tp, ENABLE_RSS);
9476         tg3_flag_clear(tp, ENABLE_TSS);
9477 }
9478
9479 static int tg3_open(struct net_device *dev)
9480 {
9481         struct tg3 *tp = netdev_priv(dev);
9482         int i, err;
9483
9484         if (tp->fw_needed) {
9485                 err = tg3_request_firmware(tp);
9486                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9487                         if (err)
9488                                 return err;
9489                 } else if (err) {
9490                         netdev_warn(tp->dev, "TSO capability disabled\n");
9491                         tg3_flag_clear(tp, TSO_CAPABLE);
9492                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9493                         netdev_notice(tp->dev, "TSO capability restored\n");
9494                         tg3_flag_set(tp, TSO_CAPABLE);
9495                 }
9496         }
9497
9498         netif_carrier_off(tp->dev);
9499
9500         err = tg3_power_up(tp);
9501         if (err)
9502                 return err;
9503
9504         tg3_full_lock(tp, 0);
9505
9506         tg3_disable_ints(tp);
9507         tg3_flag_clear(tp, INIT_COMPLETE);
9508
9509         tg3_full_unlock(tp);
9510
9511         /*
9512          * Setup interrupts first so we know how
9513          * many NAPI resources to allocate
9514          */
9515         tg3_ints_init(tp);
9516
9517         /* The placement of this call is tied
9518          * to the setup and use of Host TX descriptors.
9519          */
9520         err = tg3_alloc_consistent(tp);
9521         if (err)
9522                 goto err_out1;
9523
9524         tg3_napi_init(tp);
9525
9526         tg3_napi_enable(tp);
9527
9528         for (i = 0; i < tp->irq_cnt; i++) {
9529                 struct tg3_napi *tnapi = &tp->napi[i];
9530                 err = tg3_request_irq(tp, i);
9531                 if (err) {
9532                         for (i--; i >= 0; i--)
9533                                 free_irq(tnapi->irq_vec, tnapi);
9534                         break;
9535                 }
9536         }
9537
9538         if (err)
9539                 goto err_out2;
9540
9541         tg3_full_lock(tp, 0);
9542
9543         err = tg3_init_hw(tp, 1);
9544         if (err) {
9545                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9546                 tg3_free_rings(tp);
9547         } else {
9548                 if (tg3_flag(tp, TAGGED_STATUS) &&
9549                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9550                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9551                         tp->timer_offset = HZ;
9552                 else
9553                         tp->timer_offset = HZ / 10;
9554
9555                 BUG_ON(tp->timer_offset > HZ);
9556                 tp->timer_counter = tp->timer_multiplier =
9557                         (HZ / tp->timer_offset);
9558                 tp->asf_counter = tp->asf_multiplier =
9559                         ((HZ / tp->timer_offset) * 2);
9560
9561                 init_timer(&tp->timer);
9562                 tp->timer.expires = jiffies + tp->timer_offset;
9563                 tp->timer.data = (unsigned long) tp;
9564                 tp->timer.function = tg3_timer;
9565         }
9566
9567         tg3_full_unlock(tp);
9568
9569         if (err)
9570                 goto err_out3;
9571
9572         if (tg3_flag(tp, USING_MSI)) {
9573                 err = tg3_test_msi(tp);
9574
9575                 if (err) {
9576                         tg3_full_lock(tp, 0);
9577                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9578                         tg3_free_rings(tp);
9579                         tg3_full_unlock(tp);
9580
9581                         goto err_out2;
9582                 }
9583
9584                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9585                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9586
9587                         tw32(PCIE_TRANSACTION_CFG,
9588                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9589                 }
9590         }
9591
9592         tg3_phy_start(tp);
9593
9594         tg3_full_lock(tp, 0);
9595
9596         add_timer(&tp->timer);
9597         tg3_flag_set(tp, INIT_COMPLETE);
9598         tg3_enable_ints(tp);
9599
9600         tg3_full_unlock(tp);
9601
9602         netif_tx_start_all_queues(dev);
9603
9604         /*
9605          * Reset loopback feature if it was turned on while the device was down
9606          * make sure that it's installed properly now.
9607          */
9608         if (dev->features & NETIF_F_LOOPBACK)
9609                 tg3_set_loopback(dev, dev->features);
9610
9611         return 0;
9612
9613 err_out3:
9614         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9615                 struct tg3_napi *tnapi = &tp->napi[i];
9616                 free_irq(tnapi->irq_vec, tnapi);
9617         }
9618
9619 err_out2:
9620         tg3_napi_disable(tp);
9621         tg3_napi_fini(tp);
9622         tg3_free_consistent(tp);
9623
9624 err_out1:
9625         tg3_ints_fini(tp);
9626         tg3_frob_aux_power(tp, false);
9627         pci_set_power_state(tp->pdev, PCI_D3hot);
9628         return err;
9629 }
9630
9631 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9632                                                  struct rtnl_link_stats64 *);
9633 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9634
9635 static int tg3_close(struct net_device *dev)
9636 {
9637         int i;
9638         struct tg3 *tp = netdev_priv(dev);
9639
9640         tg3_napi_disable(tp);
9641         cancel_work_sync(&tp->reset_task);
9642
9643         netif_tx_stop_all_queues(dev);
9644
9645         del_timer_sync(&tp->timer);
9646
9647         tg3_phy_stop(tp);
9648
9649         tg3_full_lock(tp, 1);
9650
9651         tg3_disable_ints(tp);
9652
9653         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9654         tg3_free_rings(tp);
9655         tg3_flag_clear(tp, INIT_COMPLETE);
9656
9657         tg3_full_unlock(tp);
9658
9659         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9660                 struct tg3_napi *tnapi = &tp->napi[i];
9661                 free_irq(tnapi->irq_vec, tnapi);
9662         }
9663
9664         tg3_ints_fini(tp);
9665
9666         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9667
9668         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9669                sizeof(tp->estats_prev));
9670
9671         tg3_napi_fini(tp);
9672
9673         tg3_free_consistent(tp);
9674
9675         tg3_power_down(tp);
9676
9677         netif_carrier_off(tp->dev);
9678
9679         return 0;
9680 }
9681
9682 static inline u64 get_stat64(tg3_stat64_t *val)
9683 {
9684        return ((u64)val->high << 32) | ((u64)val->low);
9685 }
9686
9687 static u64 calc_crc_errors(struct tg3 *tp)
9688 {
9689         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9690
9691         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9692             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9693              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9694                 u32 val;
9695
9696                 spin_lock_bh(&tp->lock);
9697                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9698                         tg3_writephy(tp, MII_TG3_TEST1,
9699                                      val | MII_TG3_TEST1_CRC_EN);
9700                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9701                 } else
9702                         val = 0;
9703                 spin_unlock_bh(&tp->lock);
9704
9705                 tp->phy_crc_errors += val;
9706
9707                 return tp->phy_crc_errors;
9708         }
9709
9710         return get_stat64(&hw_stats->rx_fcs_errors);
9711 }
9712
9713 #define ESTAT_ADD(member) \
9714         estats->member =        old_estats->member + \
9715                                 get_stat64(&hw_stats->member)
9716
9717 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9718 {
9719         struct tg3_ethtool_stats *estats = &tp->estats;
9720         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9721         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9722
9723         if (!hw_stats)
9724                 return old_estats;
9725
9726         ESTAT_ADD(rx_octets);
9727         ESTAT_ADD(rx_fragments);
9728         ESTAT_ADD(rx_ucast_packets);
9729         ESTAT_ADD(rx_mcast_packets);
9730         ESTAT_ADD(rx_bcast_packets);
9731         ESTAT_ADD(rx_fcs_errors);
9732         ESTAT_ADD(rx_align_errors);
9733         ESTAT_ADD(rx_xon_pause_rcvd);
9734         ESTAT_ADD(rx_xoff_pause_rcvd);
9735         ESTAT_ADD(rx_mac_ctrl_rcvd);
9736         ESTAT_ADD(rx_xoff_entered);
9737         ESTAT_ADD(rx_frame_too_long_errors);
9738         ESTAT_ADD(rx_jabbers);
9739         ESTAT_ADD(rx_undersize_packets);
9740         ESTAT_ADD(rx_in_length_errors);
9741         ESTAT_ADD(rx_out_length_errors);
9742         ESTAT_ADD(rx_64_or_less_octet_packets);
9743         ESTAT_ADD(rx_65_to_127_octet_packets);
9744         ESTAT_ADD(rx_128_to_255_octet_packets);
9745         ESTAT_ADD(rx_256_to_511_octet_packets);
9746         ESTAT_ADD(rx_512_to_1023_octet_packets);
9747         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9748         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9749         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9750         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9751         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9752
9753         ESTAT_ADD(tx_octets);
9754         ESTAT_ADD(tx_collisions);
9755         ESTAT_ADD(tx_xon_sent);
9756         ESTAT_ADD(tx_xoff_sent);
9757         ESTAT_ADD(tx_flow_control);
9758         ESTAT_ADD(tx_mac_errors);
9759         ESTAT_ADD(tx_single_collisions);
9760         ESTAT_ADD(tx_mult_collisions);
9761         ESTAT_ADD(tx_deferred);
9762         ESTAT_ADD(tx_excessive_collisions);
9763         ESTAT_ADD(tx_late_collisions);
9764         ESTAT_ADD(tx_collide_2times);
9765         ESTAT_ADD(tx_collide_3times);
9766         ESTAT_ADD(tx_collide_4times);
9767         ESTAT_ADD(tx_collide_5times);
9768         ESTAT_ADD(tx_collide_6times);
9769         ESTAT_ADD(tx_collide_7times);
9770         ESTAT_ADD(tx_collide_8times);
9771         ESTAT_ADD(tx_collide_9times);
9772         ESTAT_ADD(tx_collide_10times);
9773         ESTAT_ADD(tx_collide_11times);
9774         ESTAT_ADD(tx_collide_12times);
9775         ESTAT_ADD(tx_collide_13times);
9776         ESTAT_ADD(tx_collide_14times);
9777         ESTAT_ADD(tx_collide_15times);
9778         ESTAT_ADD(tx_ucast_packets);
9779         ESTAT_ADD(tx_mcast_packets);
9780         ESTAT_ADD(tx_bcast_packets);
9781         ESTAT_ADD(tx_carrier_sense_errors);
9782         ESTAT_ADD(tx_discards);
9783         ESTAT_ADD(tx_errors);
9784
9785         ESTAT_ADD(dma_writeq_full);
9786         ESTAT_ADD(dma_write_prioq_full);
9787         ESTAT_ADD(rxbds_empty);
9788         ESTAT_ADD(rx_discards);
9789         ESTAT_ADD(rx_errors);
9790         ESTAT_ADD(rx_threshold_hit);
9791
9792         ESTAT_ADD(dma_readq_full);
9793         ESTAT_ADD(dma_read_prioq_full);
9794         ESTAT_ADD(tx_comp_queue_full);
9795
9796         ESTAT_ADD(ring_set_send_prod_index);
9797         ESTAT_ADD(ring_status_update);
9798         ESTAT_ADD(nic_irqs);
9799         ESTAT_ADD(nic_avoided_irqs);
9800         ESTAT_ADD(nic_tx_threshold_hit);
9801
9802         ESTAT_ADD(mbuf_lwm_thresh_hit);
9803
9804         return estats;
9805 }
9806
9807 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9808                                                  struct rtnl_link_stats64 *stats)
9809 {
9810         struct tg3 *tp = netdev_priv(dev);
9811         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9812         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9813
9814         if (!hw_stats)
9815                 return old_stats;
9816
9817         stats->rx_packets = old_stats->rx_packets +
9818                 get_stat64(&hw_stats->rx_ucast_packets) +
9819                 get_stat64(&hw_stats->rx_mcast_packets) +
9820                 get_stat64(&hw_stats->rx_bcast_packets);
9821
9822         stats->tx_packets = old_stats->tx_packets +
9823                 get_stat64(&hw_stats->tx_ucast_packets) +
9824                 get_stat64(&hw_stats->tx_mcast_packets) +
9825                 get_stat64(&hw_stats->tx_bcast_packets);
9826
9827         stats->rx_bytes = old_stats->rx_bytes +
9828                 get_stat64(&hw_stats->rx_octets);
9829         stats->tx_bytes = old_stats->tx_bytes +
9830                 get_stat64(&hw_stats->tx_octets);
9831
9832         stats->rx_errors = old_stats->rx_errors +
9833                 get_stat64(&hw_stats->rx_errors);
9834         stats->tx_errors = old_stats->tx_errors +
9835                 get_stat64(&hw_stats->tx_errors) +
9836                 get_stat64(&hw_stats->tx_mac_errors) +
9837                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9838                 get_stat64(&hw_stats->tx_discards);
9839
9840         stats->multicast = old_stats->multicast +
9841                 get_stat64(&hw_stats->rx_mcast_packets);
9842         stats->collisions = old_stats->collisions +
9843                 get_stat64(&hw_stats->tx_collisions);
9844
9845         stats->rx_length_errors = old_stats->rx_length_errors +
9846                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9847                 get_stat64(&hw_stats->rx_undersize_packets);
9848
9849         stats->rx_over_errors = old_stats->rx_over_errors +
9850                 get_stat64(&hw_stats->rxbds_empty);
9851         stats->rx_frame_errors = old_stats->rx_frame_errors +
9852                 get_stat64(&hw_stats->rx_align_errors);
9853         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9854                 get_stat64(&hw_stats->tx_discards);
9855         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9856                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9857
9858         stats->rx_crc_errors = old_stats->rx_crc_errors +
9859                 calc_crc_errors(tp);
9860
9861         stats->rx_missed_errors = old_stats->rx_missed_errors +
9862                 get_stat64(&hw_stats->rx_discards);
9863
9864         stats->rx_dropped = tp->rx_dropped;
9865
9866         return stats;
9867 }
9868
9869 static inline u32 calc_crc(unsigned char *buf, int len)
9870 {
9871         u32 reg;
9872         u32 tmp;
9873         int j, k;
9874
9875         reg = 0xffffffff;
9876
9877         for (j = 0; j < len; j++) {
9878                 reg ^= buf[j];
9879
9880                 for (k = 0; k < 8; k++) {
9881                         tmp = reg & 0x01;
9882
9883                         reg >>= 1;
9884
9885                         if (tmp)
9886                                 reg ^= 0xedb88320;
9887                 }
9888         }
9889
9890         return ~reg;
9891 }
9892
9893 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9894 {
9895         /* accept or reject all multicast frames */
9896         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9897         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9898         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9899         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9900 }
9901
9902 static void __tg3_set_rx_mode(struct net_device *dev)
9903 {
9904         struct tg3 *tp = netdev_priv(dev);
9905         u32 rx_mode;
9906
9907         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9908                                   RX_MODE_KEEP_VLAN_TAG);
9909
9910 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9911         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9912          * flag clear.
9913          */
9914         if (!tg3_flag(tp, ENABLE_ASF))
9915                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9916 #endif
9917
9918         if (dev->flags & IFF_PROMISC) {
9919                 /* Promiscuous mode. */
9920                 rx_mode |= RX_MODE_PROMISC;
9921         } else if (dev->flags & IFF_ALLMULTI) {
9922                 /* Accept all multicast. */
9923                 tg3_set_multi(tp, 1);
9924         } else if (netdev_mc_empty(dev)) {
9925                 /* Reject all multicast. */
9926                 tg3_set_multi(tp, 0);
9927         } else {
9928                 /* Accept one or more multicast(s). */
9929                 struct netdev_hw_addr *ha;
9930                 u32 mc_filter[4] = { 0, };
9931                 u32 regidx;
9932                 u32 bit;
9933                 u32 crc;
9934
9935                 netdev_for_each_mc_addr(ha, dev) {
9936                         crc = calc_crc(ha->addr, ETH_ALEN);
9937                         bit = ~crc & 0x7f;
9938                         regidx = (bit & 0x60) >> 5;
9939                         bit &= 0x1f;
9940                         mc_filter[regidx] |= (1 << bit);
9941                 }
9942
9943                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9944                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9945                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9946                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9947         }
9948
9949         if (rx_mode != tp->rx_mode) {
9950                 tp->rx_mode = rx_mode;
9951                 tw32_f(MAC_RX_MODE, rx_mode);
9952                 udelay(10);
9953         }
9954 }
9955
9956 static void tg3_set_rx_mode(struct net_device *dev)
9957 {
9958         struct tg3 *tp = netdev_priv(dev);
9959
9960         if (!netif_running(dev))
9961                 return;
9962
9963         tg3_full_lock(tp, 0);
9964         __tg3_set_rx_mode(dev);
9965         tg3_full_unlock(tp);
9966 }
9967
9968 static int tg3_get_regs_len(struct net_device *dev)
9969 {
9970         return TG3_REG_BLK_SIZE;
9971 }
9972
9973 static void tg3_get_regs(struct net_device *dev,
9974                 struct ethtool_regs *regs, void *_p)
9975 {
9976         struct tg3 *tp = netdev_priv(dev);
9977
9978         regs->version = 0;
9979
9980         memset(_p, 0, TG3_REG_BLK_SIZE);
9981
9982         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9983                 return;
9984
9985         tg3_full_lock(tp, 0);
9986
9987         tg3_dump_legacy_regs(tp, (u32 *)_p);
9988
9989         tg3_full_unlock(tp);
9990 }
9991
9992 static int tg3_get_eeprom_len(struct net_device *dev)
9993 {
9994         struct tg3 *tp = netdev_priv(dev);
9995
9996         return tp->nvram_size;
9997 }
9998
9999 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10000 {
10001         struct tg3 *tp = netdev_priv(dev);
10002         int ret;
10003         u8  *pd;
10004         u32 i, offset, len, b_offset, b_count;
10005         __be32 val;
10006
10007         if (tg3_flag(tp, NO_NVRAM))
10008                 return -EINVAL;
10009
10010         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10011                 return -EAGAIN;
10012
10013         offset = eeprom->offset;
10014         len = eeprom->len;
10015         eeprom->len = 0;
10016
10017         eeprom->magic = TG3_EEPROM_MAGIC;
10018
10019         if (offset & 3) {
10020                 /* adjustments to start on required 4 byte boundary */
10021                 b_offset = offset & 3;
10022                 b_count = 4 - b_offset;
10023                 if (b_count > len) {
10024                         /* i.e. offset=1 len=2 */
10025                         b_count = len;
10026                 }
10027                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10028                 if (ret)
10029                         return ret;
10030                 memcpy(data, ((char *)&val) + b_offset, b_count);
10031                 len -= b_count;
10032                 offset += b_count;
10033                 eeprom->len += b_count;
10034         }
10035
10036         /* read bytes up to the last 4 byte boundary */
10037         pd = &data[eeprom->len];
10038         for (i = 0; i < (len - (len & 3)); i += 4) {
10039                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10040                 if (ret) {
10041                         eeprom->len += i;
10042                         return ret;
10043                 }
10044                 memcpy(pd + i, &val, 4);
10045         }
10046         eeprom->len += i;
10047
10048         if (len & 3) {
10049                 /* read last bytes not ending on 4 byte boundary */
10050                 pd = &data[eeprom->len];
10051                 b_count = len & 3;
10052                 b_offset = offset + len - b_count;
10053                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10054                 if (ret)
10055                         return ret;
10056                 memcpy(pd, &val, b_count);
10057                 eeprom->len += b_count;
10058         }
10059         return 0;
10060 }
10061
10062 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10063
10064 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10065 {
10066         struct tg3 *tp = netdev_priv(dev);
10067         int ret;
10068         u32 offset, len, b_offset, odd_len;
10069         u8 *buf;
10070         __be32 start, end;
10071
10072         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10073                 return -EAGAIN;
10074
10075         if (tg3_flag(tp, NO_NVRAM) ||
10076             eeprom->magic != TG3_EEPROM_MAGIC)
10077                 return -EINVAL;
10078
10079         offset = eeprom->offset;
10080         len = eeprom->len;
10081
10082         if ((b_offset = (offset & 3))) {
10083                 /* adjustments to start on required 4 byte boundary */
10084                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10085                 if (ret)
10086                         return ret;
10087                 len += b_offset;
10088                 offset &= ~3;
10089                 if (len < 4)
10090                         len = 4;
10091         }
10092
10093         odd_len = 0;
10094         if (len & 3) {
10095                 /* adjustments to end on required 4 byte boundary */
10096                 odd_len = 1;
10097                 len = (len + 3) & ~3;
10098                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10099                 if (ret)
10100                         return ret;
10101         }
10102
10103         buf = data;
10104         if (b_offset || odd_len) {
10105                 buf = kmalloc(len, GFP_KERNEL);
10106                 if (!buf)
10107                         return -ENOMEM;
10108                 if (b_offset)
10109                         memcpy(buf, &start, 4);
10110                 if (odd_len)
10111                         memcpy(buf+len-4, &end, 4);
10112                 memcpy(buf + b_offset, data, eeprom->len);
10113         }
10114
10115         ret = tg3_nvram_write_block(tp, offset, len, buf);
10116
10117         if (buf != data)
10118                 kfree(buf);
10119
10120         return ret;
10121 }
10122
10123 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10124 {
10125         struct tg3 *tp = netdev_priv(dev);
10126
10127         if (tg3_flag(tp, USE_PHYLIB)) {
10128                 struct phy_device *phydev;
10129                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10130                         return -EAGAIN;
10131                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10132                 return phy_ethtool_gset(phydev, cmd);
10133         }
10134
10135         cmd->supported = (SUPPORTED_Autoneg);
10136
10137         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10138                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10139                                    SUPPORTED_1000baseT_Full);
10140
10141         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10142                 cmd->supported |= (SUPPORTED_100baseT_Half |
10143                                   SUPPORTED_100baseT_Full |
10144                                   SUPPORTED_10baseT_Half |
10145                                   SUPPORTED_10baseT_Full |
10146                                   SUPPORTED_TP);
10147                 cmd->port = PORT_TP;
10148         } else {
10149                 cmd->supported |= SUPPORTED_FIBRE;
10150                 cmd->port = PORT_FIBRE;
10151         }
10152
10153         cmd->advertising = tp->link_config.advertising;
10154         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10155                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10156                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10157                                 cmd->advertising |= ADVERTISED_Pause;
10158                         } else {
10159                                 cmd->advertising |= ADVERTISED_Pause |
10160                                                     ADVERTISED_Asym_Pause;
10161                         }
10162                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10163                         cmd->advertising |= ADVERTISED_Asym_Pause;
10164                 }
10165         }
10166         if (netif_running(dev)) {
10167                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10168                 cmd->duplex = tp->link_config.active_duplex;
10169         } else {
10170                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10171                 cmd->duplex = DUPLEX_INVALID;
10172         }
10173         cmd->phy_address = tp->phy_addr;
10174         cmd->transceiver = XCVR_INTERNAL;
10175         cmd->autoneg = tp->link_config.autoneg;
10176         cmd->maxtxpkt = 0;
10177         cmd->maxrxpkt = 0;
10178         return 0;
10179 }
10180
10181 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10182 {
10183         struct tg3 *tp = netdev_priv(dev);
10184         u32 speed = ethtool_cmd_speed(cmd);
10185
10186         if (tg3_flag(tp, USE_PHYLIB)) {
10187                 struct phy_device *phydev;
10188                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10189                         return -EAGAIN;
10190                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10191                 return phy_ethtool_sset(phydev, cmd);
10192         }
10193
10194         if (cmd->autoneg != AUTONEG_ENABLE &&
10195             cmd->autoneg != AUTONEG_DISABLE)
10196                 return -EINVAL;
10197
10198         if (cmd->autoneg == AUTONEG_DISABLE &&
10199             cmd->duplex != DUPLEX_FULL &&
10200             cmd->duplex != DUPLEX_HALF)
10201                 return -EINVAL;
10202
10203         if (cmd->autoneg == AUTONEG_ENABLE) {
10204                 u32 mask = ADVERTISED_Autoneg |
10205                            ADVERTISED_Pause |
10206                            ADVERTISED_Asym_Pause;
10207
10208                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10209                         mask |= ADVERTISED_1000baseT_Half |
10210                                 ADVERTISED_1000baseT_Full;
10211
10212                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10213                         mask |= ADVERTISED_100baseT_Half |
10214                                 ADVERTISED_100baseT_Full |
10215                                 ADVERTISED_10baseT_Half |
10216                                 ADVERTISED_10baseT_Full |
10217                                 ADVERTISED_TP;
10218                 else
10219                         mask |= ADVERTISED_FIBRE;
10220
10221                 if (cmd->advertising & ~mask)
10222                         return -EINVAL;
10223
10224                 mask &= (ADVERTISED_1000baseT_Half |
10225                          ADVERTISED_1000baseT_Full |
10226                          ADVERTISED_100baseT_Half |
10227                          ADVERTISED_100baseT_Full |
10228                          ADVERTISED_10baseT_Half |
10229                          ADVERTISED_10baseT_Full);
10230
10231                 cmd->advertising &= mask;
10232         } else {
10233                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10234                         if (speed != SPEED_1000)
10235                                 return -EINVAL;
10236
10237                         if (cmd->duplex != DUPLEX_FULL)
10238                                 return -EINVAL;
10239                 } else {
10240                         if (speed != SPEED_100 &&
10241                             speed != SPEED_10)
10242                                 return -EINVAL;
10243                 }
10244         }
10245
10246         tg3_full_lock(tp, 0);
10247
10248         tp->link_config.autoneg = cmd->autoneg;
10249         if (cmd->autoneg == AUTONEG_ENABLE) {
10250                 tp->link_config.advertising = (cmd->advertising |
10251                                               ADVERTISED_Autoneg);
10252                 tp->link_config.speed = SPEED_INVALID;
10253                 tp->link_config.duplex = DUPLEX_INVALID;
10254         } else {
10255                 tp->link_config.advertising = 0;
10256                 tp->link_config.speed = speed;
10257                 tp->link_config.duplex = cmd->duplex;
10258         }
10259
10260         tp->link_config.orig_speed = tp->link_config.speed;
10261         tp->link_config.orig_duplex = tp->link_config.duplex;
10262         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10263
10264         if (netif_running(dev))
10265                 tg3_setup_phy(tp, 1);
10266
10267         tg3_full_unlock(tp);
10268
10269         return 0;
10270 }
10271
10272 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10273 {
10274         struct tg3 *tp = netdev_priv(dev);
10275
10276         strcpy(info->driver, DRV_MODULE_NAME);
10277         strcpy(info->version, DRV_MODULE_VERSION);
10278         strcpy(info->fw_version, tp->fw_ver);
10279         strcpy(info->bus_info, pci_name(tp->pdev));
10280 }
10281
10282 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10283 {
10284         struct tg3 *tp = netdev_priv(dev);
10285
10286         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10287                 wol->supported = WAKE_MAGIC;
10288         else
10289                 wol->supported = 0;
10290         wol->wolopts = 0;
10291         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10292                 wol->wolopts = WAKE_MAGIC;
10293         memset(&wol->sopass, 0, sizeof(wol->sopass));
10294 }
10295
10296 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10297 {
10298         struct tg3 *tp = netdev_priv(dev);
10299         struct device *dp = &tp->pdev->dev;
10300
10301         if (wol->wolopts & ~WAKE_MAGIC)
10302                 return -EINVAL;
10303         if ((wol->wolopts & WAKE_MAGIC) &&
10304             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10305                 return -EINVAL;
10306
10307         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10308
10309         spin_lock_bh(&tp->lock);
10310         if (device_may_wakeup(dp))
10311                 tg3_flag_set(tp, WOL_ENABLE);
10312         else
10313                 tg3_flag_clear(tp, WOL_ENABLE);
10314         spin_unlock_bh(&tp->lock);
10315
10316         return 0;
10317 }
10318
10319 static u32 tg3_get_msglevel(struct net_device *dev)
10320 {
10321         struct tg3 *tp = netdev_priv(dev);
10322         return tp->msg_enable;
10323 }
10324
10325 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10326 {
10327         struct tg3 *tp = netdev_priv(dev);
10328         tp->msg_enable = value;
10329 }
10330
10331 static int tg3_nway_reset(struct net_device *dev)
10332 {
10333         struct tg3 *tp = netdev_priv(dev);
10334         int r;
10335
10336         if (!netif_running(dev))
10337                 return -EAGAIN;
10338
10339         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10340                 return -EINVAL;
10341
10342         if (tg3_flag(tp, USE_PHYLIB)) {
10343                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10344                         return -EAGAIN;
10345                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10346         } else {
10347                 u32 bmcr;
10348
10349                 spin_lock_bh(&tp->lock);
10350                 r = -EINVAL;
10351                 tg3_readphy(tp, MII_BMCR, &bmcr);
10352                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10353                     ((bmcr & BMCR_ANENABLE) ||
10354                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10355                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10356                                                    BMCR_ANENABLE);
10357                         r = 0;
10358                 }
10359                 spin_unlock_bh(&tp->lock);
10360         }
10361
10362         return r;
10363 }
10364
10365 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10366 {
10367         struct tg3 *tp = netdev_priv(dev);
10368
10369         ering->rx_max_pending = tp->rx_std_ring_mask;
10370         ering->rx_mini_max_pending = 0;
10371         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10372                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10373         else
10374                 ering->rx_jumbo_max_pending = 0;
10375
10376         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10377
10378         ering->rx_pending = tp->rx_pending;
10379         ering->rx_mini_pending = 0;
10380         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10381                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10382         else
10383                 ering->rx_jumbo_pending = 0;
10384
10385         ering->tx_pending = tp->napi[0].tx_pending;
10386 }
10387
10388 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10389 {
10390         struct tg3 *tp = netdev_priv(dev);
10391         int i, irq_sync = 0, err = 0;
10392
10393         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10394             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10395             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10396             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10397             (tg3_flag(tp, TSO_BUG) &&
10398              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10399                 return -EINVAL;
10400
10401         if (netif_running(dev)) {
10402                 tg3_phy_stop(tp);
10403                 tg3_netif_stop(tp);
10404                 irq_sync = 1;
10405         }
10406
10407         tg3_full_lock(tp, irq_sync);
10408
10409         tp->rx_pending = ering->rx_pending;
10410
10411         if (tg3_flag(tp, MAX_RXPEND_64) &&
10412             tp->rx_pending > 63)
10413                 tp->rx_pending = 63;
10414         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10415
10416         for (i = 0; i < tp->irq_max; i++)
10417                 tp->napi[i].tx_pending = ering->tx_pending;
10418
10419         if (netif_running(dev)) {
10420                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10421                 err = tg3_restart_hw(tp, 1);
10422                 if (!err)
10423                         tg3_netif_start(tp);
10424         }
10425
10426         tg3_full_unlock(tp);
10427
10428         if (irq_sync && !err)
10429                 tg3_phy_start(tp);
10430
10431         return err;
10432 }
10433
10434 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10435 {
10436         struct tg3 *tp = netdev_priv(dev);
10437
10438         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10439
10440         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10441                 epause->rx_pause = 1;
10442         else
10443                 epause->rx_pause = 0;
10444
10445         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10446                 epause->tx_pause = 1;
10447         else
10448                 epause->tx_pause = 0;
10449 }
10450
10451 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10452 {
10453         struct tg3 *tp = netdev_priv(dev);
10454         int err = 0;
10455
10456         if (tg3_flag(tp, USE_PHYLIB)) {
10457                 u32 newadv;
10458                 struct phy_device *phydev;
10459
10460                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10461
10462                 if (!(phydev->supported & SUPPORTED_Pause) ||
10463                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10464                      (epause->rx_pause != epause->tx_pause)))
10465                         return -EINVAL;
10466
10467                 tp->link_config.flowctrl = 0;
10468                 if (epause->rx_pause) {
10469                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10470
10471                         if (epause->tx_pause) {
10472                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10473                                 newadv = ADVERTISED_Pause;
10474                         } else
10475                                 newadv = ADVERTISED_Pause |
10476                                          ADVERTISED_Asym_Pause;
10477                 } else if (epause->tx_pause) {
10478                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10479                         newadv = ADVERTISED_Asym_Pause;
10480                 } else
10481                         newadv = 0;
10482
10483                 if (epause->autoneg)
10484                         tg3_flag_set(tp, PAUSE_AUTONEG);
10485                 else
10486                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10487
10488                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10489                         u32 oldadv = phydev->advertising &
10490                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10491                         if (oldadv != newadv) {
10492                                 phydev->advertising &=
10493                                         ~(ADVERTISED_Pause |
10494                                           ADVERTISED_Asym_Pause);
10495                                 phydev->advertising |= newadv;
10496                                 if (phydev->autoneg) {
10497                                         /*
10498                                          * Always renegotiate the link to
10499                                          * inform our link partner of our
10500                                          * flow control settings, even if the
10501                                          * flow control is forced.  Let
10502                                          * tg3_adjust_link() do the final
10503                                          * flow control setup.
10504                                          */
10505                                         return phy_start_aneg(phydev);
10506                                 }
10507                         }
10508
10509                         if (!epause->autoneg)
10510                                 tg3_setup_flow_control(tp, 0, 0);
10511                 } else {
10512                         tp->link_config.orig_advertising &=
10513                                         ~(ADVERTISED_Pause |
10514                                           ADVERTISED_Asym_Pause);
10515                         tp->link_config.orig_advertising |= newadv;
10516                 }
10517         } else {
10518                 int irq_sync = 0;
10519
10520                 if (netif_running(dev)) {
10521                         tg3_netif_stop(tp);
10522                         irq_sync = 1;
10523                 }
10524
10525                 tg3_full_lock(tp, irq_sync);
10526
10527                 if (epause->autoneg)
10528                         tg3_flag_set(tp, PAUSE_AUTONEG);
10529                 else
10530                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10531                 if (epause->rx_pause)
10532                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10533                 else
10534                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10535                 if (epause->tx_pause)
10536                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10537                 else
10538                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10539
10540                 if (netif_running(dev)) {
10541                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10542                         err = tg3_restart_hw(tp, 1);
10543                         if (!err)
10544                                 tg3_netif_start(tp);
10545                 }
10546
10547                 tg3_full_unlock(tp);
10548         }
10549
10550         return err;
10551 }
10552
10553 static int tg3_get_sset_count(struct net_device *dev, int sset)
10554 {
10555         switch (sset) {
10556         case ETH_SS_TEST:
10557                 return TG3_NUM_TEST;
10558         case ETH_SS_STATS:
10559                 return TG3_NUM_STATS;
10560         default:
10561                 return -EOPNOTSUPP;
10562         }
10563 }
10564
10565 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10566 {
10567         switch (stringset) {
10568         case ETH_SS_STATS:
10569                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10570                 break;
10571         case ETH_SS_TEST:
10572                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10573                 break;
10574         default:
10575                 WARN_ON(1);     /* we need a WARN() */
10576                 break;
10577         }
10578 }
10579
10580 static int tg3_set_phys_id(struct net_device *dev,
10581                             enum ethtool_phys_id_state state)
10582 {
10583         struct tg3 *tp = netdev_priv(dev);
10584
10585         if (!netif_running(tp->dev))
10586                 return -EAGAIN;
10587
10588         switch (state) {
10589         case ETHTOOL_ID_ACTIVE:
10590                 return 1;       /* cycle on/off once per second */
10591
10592         case ETHTOOL_ID_ON:
10593                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10594                      LED_CTRL_1000MBPS_ON |
10595                      LED_CTRL_100MBPS_ON |
10596                      LED_CTRL_10MBPS_ON |
10597                      LED_CTRL_TRAFFIC_OVERRIDE |
10598                      LED_CTRL_TRAFFIC_BLINK |
10599                      LED_CTRL_TRAFFIC_LED);
10600                 break;
10601
10602         case ETHTOOL_ID_OFF:
10603                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10604                      LED_CTRL_TRAFFIC_OVERRIDE);
10605                 break;
10606
10607         case ETHTOOL_ID_INACTIVE:
10608                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10609                 break;
10610         }
10611
10612         return 0;
10613 }
10614
10615 static void tg3_get_ethtool_stats(struct net_device *dev,
10616                                    struct ethtool_stats *estats, u64 *tmp_stats)
10617 {
10618         struct tg3 *tp = netdev_priv(dev);
10619         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10620 }
10621
10622 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10623 {
10624         int i;
10625         __be32 *buf;
10626         u32 offset = 0, len = 0;
10627         u32 magic, val;
10628
10629         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10630                 return NULL;
10631
10632         if (magic == TG3_EEPROM_MAGIC) {
10633                 for (offset = TG3_NVM_DIR_START;
10634                      offset < TG3_NVM_DIR_END;
10635                      offset += TG3_NVM_DIRENT_SIZE) {
10636                         if (tg3_nvram_read(tp, offset, &val))
10637                                 return NULL;
10638
10639                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10640                             TG3_NVM_DIRTYPE_EXTVPD)
10641                                 break;
10642                 }
10643
10644                 if (offset != TG3_NVM_DIR_END) {
10645                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10646                         if (tg3_nvram_read(tp, offset + 4, &offset))
10647                                 return NULL;
10648
10649                         offset = tg3_nvram_logical_addr(tp, offset);
10650                 }
10651         }
10652
10653         if (!offset || !len) {
10654                 offset = TG3_NVM_VPD_OFF;
10655                 len = TG3_NVM_VPD_LEN;
10656         }
10657
10658         buf = kmalloc(len, GFP_KERNEL);
10659         if (buf == NULL)
10660                 return NULL;
10661
10662         if (magic == TG3_EEPROM_MAGIC) {
10663                 for (i = 0; i < len; i += 4) {
10664                         /* The data is in little-endian format in NVRAM.
10665                          * Use the big-endian read routines to preserve
10666                          * the byte order as it exists in NVRAM.
10667                          */
10668                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10669                                 goto error;
10670                 }
10671         } else {
10672                 u8 *ptr;
10673                 ssize_t cnt;
10674                 unsigned int pos = 0;
10675
10676                 ptr = (u8 *)&buf[0];
10677                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10678                         cnt = pci_read_vpd(tp->pdev, pos,
10679                                            len - pos, ptr);
10680                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10681                                 cnt = 0;
10682                         else if (cnt < 0)
10683                                 goto error;
10684                 }
10685                 if (pos != len)
10686                         goto error;
10687         }
10688
10689         *vpdlen = len;
10690
10691         return buf;
10692
10693 error:
10694         kfree(buf);
10695         return NULL;
10696 }
10697
10698 #define NVRAM_TEST_SIZE 0x100
10699 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10700 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10701 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10702 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10703 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10704 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10705 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10706 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10707
10708 static int tg3_test_nvram(struct tg3 *tp)
10709 {
10710         u32 csum, magic, len;
10711         __be32 *buf;
10712         int i, j, k, err = 0, size;
10713
10714         if (tg3_flag(tp, NO_NVRAM))
10715                 return 0;
10716
10717         if (tg3_nvram_read(tp, 0, &magic) != 0)
10718                 return -EIO;
10719
10720         if (magic == TG3_EEPROM_MAGIC)
10721                 size = NVRAM_TEST_SIZE;
10722         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10723                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10724                     TG3_EEPROM_SB_FORMAT_1) {
10725                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10726                         case TG3_EEPROM_SB_REVISION_0:
10727                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10728                                 break;
10729                         case TG3_EEPROM_SB_REVISION_2:
10730                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10731                                 break;
10732                         case TG3_EEPROM_SB_REVISION_3:
10733                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10734                                 break;
10735                         case TG3_EEPROM_SB_REVISION_4:
10736                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10737                                 break;
10738                         case TG3_EEPROM_SB_REVISION_5:
10739                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10740                                 break;
10741                         case TG3_EEPROM_SB_REVISION_6:
10742                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10743                                 break;
10744                         default:
10745                                 return -EIO;
10746                         }
10747                 } else
10748                         return 0;
10749         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10750                 size = NVRAM_SELFBOOT_HW_SIZE;
10751         else
10752                 return -EIO;
10753
10754         buf = kmalloc(size, GFP_KERNEL);
10755         if (buf == NULL)
10756                 return -ENOMEM;
10757
10758         err = -EIO;
10759         for (i = 0, j = 0; i < size; i += 4, j++) {
10760                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10761                 if (err)
10762                         break;
10763         }
10764         if (i < size)
10765                 goto out;
10766
10767         /* Selfboot format */
10768         magic = be32_to_cpu(buf[0]);
10769         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10770             TG3_EEPROM_MAGIC_FW) {
10771                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10772
10773                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10774                     TG3_EEPROM_SB_REVISION_2) {
10775                         /* For rev 2, the csum doesn't include the MBA. */
10776                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10777                                 csum8 += buf8[i];
10778                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10779                                 csum8 += buf8[i];
10780                 } else {
10781                         for (i = 0; i < size; i++)
10782                                 csum8 += buf8[i];
10783                 }
10784
10785                 if (csum8 == 0) {
10786                         err = 0;
10787                         goto out;
10788                 }
10789
10790                 err = -EIO;
10791                 goto out;
10792         }
10793
10794         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10795             TG3_EEPROM_MAGIC_HW) {
10796                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10797                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10798                 u8 *buf8 = (u8 *) buf;
10799
10800                 /* Separate the parity bits and the data bytes.  */
10801                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10802                         if ((i == 0) || (i == 8)) {
10803                                 int l;
10804                                 u8 msk;
10805
10806                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10807                                         parity[k++] = buf8[i] & msk;
10808                                 i++;
10809                         } else if (i == 16) {
10810                                 int l;
10811                                 u8 msk;
10812
10813                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10814                                         parity[k++] = buf8[i] & msk;
10815                                 i++;
10816
10817                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10818                                         parity[k++] = buf8[i] & msk;
10819                                 i++;
10820                         }
10821                         data[j++] = buf8[i];
10822                 }
10823
10824                 err = -EIO;
10825                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10826                         u8 hw8 = hweight8(data[i]);
10827
10828                         if ((hw8 & 0x1) && parity[i])
10829                                 goto out;
10830                         else if (!(hw8 & 0x1) && !parity[i])
10831                                 goto out;
10832                 }
10833                 err = 0;
10834                 goto out;
10835         }
10836
10837         err = -EIO;
10838
10839         /* Bootstrap checksum at offset 0x10 */
10840         csum = calc_crc((unsigned char *) buf, 0x10);
10841         if (csum != le32_to_cpu(buf[0x10/4]))
10842                 goto out;
10843
10844         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10845         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10846         if (csum != le32_to_cpu(buf[0xfc/4]))
10847                 goto out;
10848
10849         kfree(buf);
10850
10851         buf = tg3_vpd_readblock(tp, &len);
10852         if (!buf)
10853                 return -ENOMEM;
10854
10855         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10856         if (i > 0) {
10857                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10858                 if (j < 0)
10859                         goto out;
10860
10861                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10862                         goto out;
10863
10864                 i += PCI_VPD_LRDT_TAG_SIZE;
10865                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10866                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10867                 if (j > 0) {
10868                         u8 csum8 = 0;
10869
10870                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10871
10872                         for (i = 0; i <= j; i++)
10873                                 csum8 += ((u8 *)buf)[i];
10874
10875                         if (csum8)
10876                                 goto out;
10877                 }
10878         }
10879
10880         err = 0;
10881
10882 out:
10883         kfree(buf);
10884         return err;
10885 }
10886
10887 #define TG3_SERDES_TIMEOUT_SEC  2
10888 #define TG3_COPPER_TIMEOUT_SEC  6
10889
10890 static int tg3_test_link(struct tg3 *tp)
10891 {
10892         int i, max;
10893
10894         if (!netif_running(tp->dev))
10895                 return -ENODEV;
10896
10897         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10898                 max = TG3_SERDES_TIMEOUT_SEC;
10899         else
10900                 max = TG3_COPPER_TIMEOUT_SEC;
10901
10902         for (i = 0; i < max; i++) {
10903                 if (netif_carrier_ok(tp->dev))
10904                         return 0;
10905
10906                 if (msleep_interruptible(1000))
10907                         break;
10908         }
10909
10910         return -EIO;
10911 }
10912
10913 /* Only test the commonly used registers */
10914 static int tg3_test_registers(struct tg3 *tp)
10915 {
10916         int i, is_5705, is_5750;
10917         u32 offset, read_mask, write_mask, val, save_val, read_val;
10918         static struct {
10919                 u16 offset;
10920                 u16 flags;
10921 #define TG3_FL_5705     0x1
10922 #define TG3_FL_NOT_5705 0x2
10923 #define TG3_FL_NOT_5788 0x4
10924 #define TG3_FL_NOT_5750 0x8
10925                 u32 read_mask;
10926                 u32 write_mask;
10927         } reg_tbl[] = {
10928                 /* MAC Control Registers */
10929                 { MAC_MODE, TG3_FL_NOT_5705,
10930                         0x00000000, 0x00ef6f8c },
10931                 { MAC_MODE, TG3_FL_5705,
10932                         0x00000000, 0x01ef6b8c },
10933                 { MAC_STATUS, TG3_FL_NOT_5705,
10934                         0x03800107, 0x00000000 },
10935                 { MAC_STATUS, TG3_FL_5705,
10936                         0x03800100, 0x00000000 },
10937                 { MAC_ADDR_0_HIGH, 0x0000,
10938                         0x00000000, 0x0000ffff },
10939                 { MAC_ADDR_0_LOW, 0x0000,
10940                         0x00000000, 0xffffffff },
10941                 { MAC_RX_MTU_SIZE, 0x0000,
10942                         0x00000000, 0x0000ffff },
10943                 { MAC_TX_MODE, 0x0000,
10944                         0x00000000, 0x00000070 },
10945                 { MAC_TX_LENGTHS, 0x0000,
10946                         0x00000000, 0x00003fff },
10947                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10948                         0x00000000, 0x000007fc },
10949                 { MAC_RX_MODE, TG3_FL_5705,
10950                         0x00000000, 0x000007dc },
10951                 { MAC_HASH_REG_0, 0x0000,
10952                         0x00000000, 0xffffffff },
10953                 { MAC_HASH_REG_1, 0x0000,
10954                         0x00000000, 0xffffffff },
10955                 { MAC_HASH_REG_2, 0x0000,
10956                         0x00000000, 0xffffffff },
10957                 { MAC_HASH_REG_3, 0x0000,
10958                         0x00000000, 0xffffffff },
10959
10960                 /* Receive Data and Receive BD Initiator Control Registers. */
10961                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10962                         0x00000000, 0xffffffff },
10963                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10964                         0x00000000, 0xffffffff },
10965                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10966                         0x00000000, 0x00000003 },
10967                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10968                         0x00000000, 0xffffffff },
10969                 { RCVDBDI_STD_BD+0, 0x0000,
10970                         0x00000000, 0xffffffff },
10971                 { RCVDBDI_STD_BD+4, 0x0000,
10972                         0x00000000, 0xffffffff },
10973                 { RCVDBDI_STD_BD+8, 0x0000,
10974                         0x00000000, 0xffff0002 },
10975                 { RCVDBDI_STD_BD+0xc, 0x0000,
10976                         0x00000000, 0xffffffff },
10977
10978                 /* Receive BD Initiator Control Registers. */
10979                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10980                         0x00000000, 0xffffffff },
10981                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10982                         0x00000000, 0x000003ff },
10983                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10984                         0x00000000, 0xffffffff },
10985
10986                 /* Host Coalescing Control Registers. */
10987                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10988                         0x00000000, 0x00000004 },
10989                 { HOSTCC_MODE, TG3_FL_5705,
10990                         0x00000000, 0x000000f6 },
10991                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10992                         0x00000000, 0xffffffff },
10993                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10994                         0x00000000, 0x000003ff },
10995                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10996                         0x00000000, 0xffffffff },
10997                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10998                         0x00000000, 0x000003ff },
10999                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11000                         0x00000000, 0xffffffff },
11001                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11002                         0x00000000, 0x000000ff },
11003                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11004                         0x00000000, 0xffffffff },
11005                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11006                         0x00000000, 0x000000ff },
11007                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11008                         0x00000000, 0xffffffff },
11009                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11010                         0x00000000, 0xffffffff },
11011                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11012                         0x00000000, 0xffffffff },
11013                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11014                         0x00000000, 0x000000ff },
11015                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11016                         0x00000000, 0xffffffff },
11017                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11018                         0x00000000, 0x000000ff },
11019                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11020                         0x00000000, 0xffffffff },
11021                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11022                         0x00000000, 0xffffffff },
11023                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11024                         0x00000000, 0xffffffff },
11025                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11026                         0x00000000, 0xffffffff },
11027                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11028                         0x00000000, 0xffffffff },
11029                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11030                         0xffffffff, 0x00000000 },
11031                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11032                         0xffffffff, 0x00000000 },
11033
11034                 /* Buffer Manager Control Registers. */
11035                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11036                         0x00000000, 0x007fff80 },
11037                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11038                         0x00000000, 0x007fffff },
11039                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11040                         0x00000000, 0x0000003f },
11041                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11042                         0x00000000, 0x000001ff },
11043                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11044                         0x00000000, 0x000001ff },
11045                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11046                         0xffffffff, 0x00000000 },
11047                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11048                         0xffffffff, 0x00000000 },
11049
11050                 /* Mailbox Registers */
11051                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11052                         0x00000000, 0x000001ff },
11053                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11054                         0x00000000, 0x000001ff },
11055                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11056                         0x00000000, 0x000007ff },
11057                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11058                         0x00000000, 0x000001ff },
11059
11060                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11061         };
11062
11063         is_5705 = is_5750 = 0;
11064         if (tg3_flag(tp, 5705_PLUS)) {
11065                 is_5705 = 1;
11066                 if (tg3_flag(tp, 5750_PLUS))
11067                         is_5750 = 1;
11068         }
11069
11070         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11071                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11072                         continue;
11073
11074                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11075                         continue;
11076
11077                 if (tg3_flag(tp, IS_5788) &&
11078                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11079                         continue;
11080
11081                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11082                         continue;
11083
11084                 offset = (u32) reg_tbl[i].offset;
11085                 read_mask = reg_tbl[i].read_mask;
11086                 write_mask = reg_tbl[i].write_mask;
11087
11088                 /* Save the original register content */
11089                 save_val = tr32(offset);
11090
11091                 /* Determine the read-only value. */
11092                 read_val = save_val & read_mask;
11093
11094                 /* Write zero to the register, then make sure the read-only bits
11095                  * are not changed and the read/write bits are all zeros.
11096                  */
11097                 tw32(offset, 0);
11098
11099                 val = tr32(offset);
11100
11101                 /* Test the read-only and read/write bits. */
11102                 if (((val & read_mask) != read_val) || (val & write_mask))
11103                         goto out;
11104
11105                 /* Write ones to all the bits defined by RdMask and WrMask, then
11106                  * make sure the read-only bits are not changed and the
11107                  * read/write bits are all ones.
11108                  */
11109                 tw32(offset, read_mask | write_mask);
11110
11111                 val = tr32(offset);
11112
11113                 /* Test the read-only bits. */
11114                 if ((val & read_mask) != read_val)
11115                         goto out;
11116
11117                 /* Test the read/write bits. */
11118                 if ((val & write_mask) != write_mask)
11119                         goto out;
11120
11121                 tw32(offset, save_val);
11122         }
11123
11124         return 0;
11125
11126 out:
11127         if (netif_msg_hw(tp))
11128                 netdev_err(tp->dev,
11129                            "Register test failed at offset %x\n", offset);
11130         tw32(offset, save_val);
11131         return -EIO;
11132 }
11133
11134 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11135 {
11136         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11137         int i;
11138         u32 j;
11139
11140         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11141                 for (j = 0; j < len; j += 4) {
11142                         u32 val;
11143
11144                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11145                         tg3_read_mem(tp, offset + j, &val);
11146                         if (val != test_pattern[i])
11147                                 return -EIO;
11148                 }
11149         }
11150         return 0;
11151 }
11152
11153 static int tg3_test_memory(struct tg3 *tp)
11154 {
11155         static struct mem_entry {
11156                 u32 offset;
11157                 u32 len;
11158         } mem_tbl_570x[] = {
11159                 { 0x00000000, 0x00b50},
11160                 { 0x00002000, 0x1c000},
11161                 { 0xffffffff, 0x00000}
11162         }, mem_tbl_5705[] = {
11163                 { 0x00000100, 0x0000c},
11164                 { 0x00000200, 0x00008},
11165                 { 0x00004000, 0x00800},
11166                 { 0x00006000, 0x01000},
11167                 { 0x00008000, 0x02000},
11168                 { 0x00010000, 0x0e000},
11169                 { 0xffffffff, 0x00000}
11170         }, mem_tbl_5755[] = {
11171                 { 0x00000200, 0x00008},
11172                 { 0x00004000, 0x00800},
11173                 { 0x00006000, 0x00800},
11174                 { 0x00008000, 0x02000},
11175                 { 0x00010000, 0x0c000},
11176                 { 0xffffffff, 0x00000}
11177         }, mem_tbl_5906[] = {
11178                 { 0x00000200, 0x00008},
11179                 { 0x00004000, 0x00400},
11180                 { 0x00006000, 0x00400},
11181                 { 0x00008000, 0x01000},
11182                 { 0x00010000, 0x01000},
11183                 { 0xffffffff, 0x00000}
11184         }, mem_tbl_5717[] = {
11185                 { 0x00000200, 0x00008},
11186                 { 0x00010000, 0x0a000},
11187                 { 0x00020000, 0x13c00},
11188                 { 0xffffffff, 0x00000}
11189         }, mem_tbl_57765[] = {
11190                 { 0x00000200, 0x00008},
11191                 { 0x00004000, 0x00800},
11192                 { 0x00006000, 0x09800},
11193                 { 0x00010000, 0x0a000},
11194                 { 0xffffffff, 0x00000}
11195         };
11196         struct mem_entry *mem_tbl;
11197         int err = 0;
11198         int i;
11199
11200         if (tg3_flag(tp, 5717_PLUS))
11201                 mem_tbl = mem_tbl_5717;
11202         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11203                 mem_tbl = mem_tbl_57765;
11204         else if (tg3_flag(tp, 5755_PLUS))
11205                 mem_tbl = mem_tbl_5755;
11206         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11207                 mem_tbl = mem_tbl_5906;
11208         else if (tg3_flag(tp, 5705_PLUS))
11209                 mem_tbl = mem_tbl_5705;
11210         else
11211                 mem_tbl = mem_tbl_570x;
11212
11213         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11214                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11215                 if (err)
11216                         break;
11217         }
11218
11219         return err;
11220 }
11221
11222 #define TG3_MAC_LOOPBACK        0
11223 #define TG3_PHY_LOOPBACK        1
11224 #define TG3_TSO_LOOPBACK        2
11225
11226 #define TG3_TSO_MSS             500
11227
11228 #define TG3_TSO_IP_HDR_LEN      20
11229 #define TG3_TSO_TCP_HDR_LEN     20
11230 #define TG3_TSO_TCP_OPT_LEN     12
11231
11232 static const u8 tg3_tso_header[] = {
11233 0x08, 0x00,
11234 0x45, 0x00, 0x00, 0x00,
11235 0x00, 0x00, 0x40, 0x00,
11236 0x40, 0x06, 0x00, 0x00,
11237 0x0a, 0x00, 0x00, 0x01,
11238 0x0a, 0x00, 0x00, 0x02,
11239 0x0d, 0x00, 0xe0, 0x00,
11240 0x00, 0x00, 0x01, 0x00,
11241 0x00, 0x00, 0x02, 0x00,
11242 0x80, 0x10, 0x10, 0x00,
11243 0x14, 0x09, 0x00, 0x00,
11244 0x01, 0x01, 0x08, 0x0a,
11245 0x11, 0x11, 0x11, 0x11,
11246 0x11, 0x11, 0x11, 0x11,
11247 };
11248
11249 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11250 {
11251         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11252         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11253         u32 budget;
11254         struct sk_buff *skb, *rx_skb;
11255         u8 *tx_data;
11256         dma_addr_t map;
11257         int num_pkts, tx_len, rx_len, i, err;
11258         struct tg3_rx_buffer_desc *desc;
11259         struct tg3_napi *tnapi, *rnapi;
11260         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11261
11262         tnapi = &tp->napi[0];
11263         rnapi = &tp->napi[0];
11264         if (tp->irq_cnt > 1) {
11265                 if (tg3_flag(tp, ENABLE_RSS))
11266                         rnapi = &tp->napi[1];
11267                 if (tg3_flag(tp, ENABLE_TSS))
11268                         tnapi = &tp->napi[1];
11269         }
11270         coal_now = tnapi->coal_now | rnapi->coal_now;
11271
11272         if (loopback_mode == TG3_MAC_LOOPBACK) {
11273                 /* HW errata - mac loopback fails in some cases on 5780.
11274                  * Normal traffic and PHY loopback are not affected by
11275                  * errata.  Also, the MAC loopback test is deprecated for
11276                  * all newer ASIC revisions.
11277                  */
11278                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11279                     tg3_flag(tp, CPMU_PRESENT))
11280                         return 0;
11281
11282                 mac_mode = tp->mac_mode &
11283                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11284                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11285                 if (!tg3_flag(tp, 5705_PLUS))
11286                         mac_mode |= MAC_MODE_LINK_POLARITY;
11287                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11288                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11289                 else
11290                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11291                 tw32(MAC_MODE, mac_mode);
11292         } else {
11293                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11294                         tg3_phy_fet_toggle_apd(tp, false);
11295                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11296                 } else
11297                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11298
11299                 tg3_phy_toggle_automdix(tp, 0);
11300
11301                 tg3_writephy(tp, MII_BMCR, val);
11302                 udelay(40);
11303
11304                 mac_mode = tp->mac_mode &
11305                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11306                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11307                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11308                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11309                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11310                         /* The write needs to be flushed for the AC131 */
11311                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11312                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11313                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11314                 } else
11315                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11316
11317                 /* reset to prevent losing 1st rx packet intermittently */
11318                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11319                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11320                         udelay(10);
11321                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11322                 }
11323                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11324                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11325                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11326                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11327                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11328                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11329                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11330                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11331                 }
11332                 tw32(MAC_MODE, mac_mode);
11333
11334                 /* Wait for link */
11335                 for (i = 0; i < 100; i++) {
11336                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11337                                 break;
11338                         mdelay(1);
11339                 }
11340         }
11341
11342         err = -EIO;
11343
11344         tx_len = pktsz;
11345         skb = netdev_alloc_skb(tp->dev, tx_len);
11346         if (!skb)
11347                 return -ENOMEM;
11348
11349         tx_data = skb_put(skb, tx_len);
11350         memcpy(tx_data, tp->dev->dev_addr, 6);
11351         memset(tx_data + 6, 0x0, 8);
11352
11353         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11354
11355         if (loopback_mode == TG3_TSO_LOOPBACK) {
11356                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11357
11358                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11359                               TG3_TSO_TCP_OPT_LEN;
11360
11361                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11362                        sizeof(tg3_tso_header));
11363                 mss = TG3_TSO_MSS;
11364
11365                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11366                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11367
11368                 /* Set the total length field in the IP header */
11369                 iph->tot_len = htons((u16)(mss + hdr_len));
11370
11371                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11372                               TXD_FLAG_CPU_POST_DMA);
11373
11374                 if (tg3_flag(tp, HW_TSO_1) ||
11375                     tg3_flag(tp, HW_TSO_2) ||
11376                     tg3_flag(tp, HW_TSO_3)) {
11377                         struct tcphdr *th;
11378                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11379                         th = (struct tcphdr *)&tx_data[val];
11380                         th->check = 0;
11381                 } else
11382                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11383
11384                 if (tg3_flag(tp, HW_TSO_3)) {
11385                         mss |= (hdr_len & 0xc) << 12;
11386                         if (hdr_len & 0x10)
11387                                 base_flags |= 0x00000010;
11388                         base_flags |= (hdr_len & 0x3e0) << 5;
11389                 } else if (tg3_flag(tp, HW_TSO_2))
11390                         mss |= hdr_len << 9;
11391                 else if (tg3_flag(tp, HW_TSO_1) ||
11392                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11393                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11394                 } else {
11395                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11396                 }
11397
11398                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11399         } else {
11400                 num_pkts = 1;
11401                 data_off = ETH_HLEN;
11402         }
11403
11404         for (i = data_off; i < tx_len; i++)
11405                 tx_data[i] = (u8) (i & 0xff);
11406
11407         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11408         if (pci_dma_mapping_error(tp->pdev, map)) {
11409                 dev_kfree_skb(skb);
11410                 return -EIO;
11411         }
11412
11413         val = tnapi->tx_prod;
11414         tnapi->tx_buffers[val].skb = skb;
11415         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11416
11417         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11418                rnapi->coal_now);
11419
11420         udelay(10);
11421
11422         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11423
11424         budget = tg3_tx_avail(tnapi);
11425         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11426                             base_flags | TXD_FLAG_END, mss, 0)) {
11427                 tnapi->tx_buffers[val].skb = NULL;
11428                 dev_kfree_skb(skb);
11429                 return -EIO;
11430         }
11431
11432         tnapi->tx_prod++;
11433
11434         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11435         tr32_mailbox(tnapi->prodmbox);
11436
11437         udelay(10);
11438
11439         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11440         for (i = 0; i < 35; i++) {
11441                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11442                        coal_now);
11443
11444                 udelay(10);
11445
11446                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11447                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11448                 if ((tx_idx == tnapi->tx_prod) &&
11449                     (rx_idx == (rx_start_idx + num_pkts)))
11450                         break;
11451         }
11452
11453         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11454         dev_kfree_skb(skb);
11455
11456         if (tx_idx != tnapi->tx_prod)
11457                 goto out;
11458
11459         if (rx_idx != rx_start_idx + num_pkts)
11460                 goto out;
11461
11462         val = data_off;
11463         while (rx_idx != rx_start_idx) {
11464                 desc = &rnapi->rx_rcb[rx_start_idx++];
11465                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11466                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11467
11468                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11469                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11470                         goto out;
11471
11472                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11473                          - ETH_FCS_LEN;
11474
11475                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11476                         if (rx_len != tx_len)
11477                                 goto out;
11478
11479                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11480                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11481                                         goto out;
11482                         } else {
11483                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11484                                         goto out;
11485                         }
11486                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11487                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11488                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11489                         goto out;
11490                 }
11491
11492                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11493                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11494                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11495                                              mapping);
11496                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11497                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11498                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11499                                              mapping);
11500                 } else
11501                         goto out;
11502
11503                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11504                                             PCI_DMA_FROMDEVICE);
11505
11506                 for (i = data_off; i < rx_len; i++, val++) {
11507                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11508                                 goto out;
11509                 }
11510         }
11511
11512         err = 0;
11513
11514         /* tg3_free_rings will unmap and free the rx_skb */
11515 out:
11516         return err;
11517 }
11518
11519 #define TG3_STD_LOOPBACK_FAILED         1
11520 #define TG3_JMB_LOOPBACK_FAILED         2
11521 #define TG3_TSO_LOOPBACK_FAILED         4
11522
11523 #define TG3_MAC_LOOPBACK_SHIFT          0
11524 #define TG3_PHY_LOOPBACK_SHIFT          4
11525 #define TG3_LOOPBACK_FAILED             0x00000077
11526
11527 static int tg3_test_loopback(struct tg3 *tp)
11528 {
11529         int err = 0;
11530         u32 eee_cap, cpmuctrl = 0;
11531
11532         if (!netif_running(tp->dev))
11533                 return TG3_LOOPBACK_FAILED;
11534
11535         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11536         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11537
11538         err = tg3_reset_hw(tp, 1);
11539         if (err) {
11540                 err = TG3_LOOPBACK_FAILED;
11541                 goto done;
11542         }
11543
11544         if (tg3_flag(tp, ENABLE_RSS)) {
11545                 int i;
11546
11547                 /* Reroute all rx packets to the 1st queue */
11548                 for (i = MAC_RSS_INDIR_TBL_0;
11549                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11550                         tw32(i, 0x0);
11551         }
11552
11553         /* Turn off gphy autopowerdown. */
11554         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11555                 tg3_phy_toggle_apd(tp, false);
11556
11557         if (tg3_flag(tp, CPMU_PRESENT)) {
11558                 int i;
11559                 u32 status;
11560
11561                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11562
11563                 /* Wait for up to 40 microseconds to acquire lock. */
11564                 for (i = 0; i < 4; i++) {
11565                         status = tr32(TG3_CPMU_MUTEX_GNT);
11566                         if (status == CPMU_MUTEX_GNT_DRIVER)
11567                                 break;
11568                         udelay(10);
11569                 }
11570
11571                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11572                         err = TG3_LOOPBACK_FAILED;
11573                         goto done;
11574                 }
11575
11576                 /* Turn off link-based power management. */
11577                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11578                 tw32(TG3_CPMU_CTRL,
11579                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11580                                   CPMU_CTRL_LINK_AWARE_MODE));
11581         }
11582
11583         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11584                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11585
11586         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11587             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11588                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11589
11590         if (tg3_flag(tp, CPMU_PRESENT)) {
11591                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11592
11593                 /* Release the mutex */
11594                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11595         }
11596
11597         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11598             !tg3_flag(tp, USE_PHYLIB)) {
11599                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11600                         err |= TG3_STD_LOOPBACK_FAILED <<
11601                                TG3_PHY_LOOPBACK_SHIFT;
11602                 if (tg3_flag(tp, TSO_CAPABLE) &&
11603                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11604                         err |= TG3_TSO_LOOPBACK_FAILED <<
11605                                TG3_PHY_LOOPBACK_SHIFT;
11606                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11607                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11608                         err |= TG3_JMB_LOOPBACK_FAILED <<
11609                                TG3_PHY_LOOPBACK_SHIFT;
11610         }
11611
11612         /* Re-enable gphy autopowerdown. */
11613         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11614                 tg3_phy_toggle_apd(tp, true);
11615
11616 done:
11617         tp->phy_flags |= eee_cap;
11618
11619         return err;
11620 }
11621
11622 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11623                           u64 *data)
11624 {
11625         struct tg3 *tp = netdev_priv(dev);
11626
11627         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11628             tg3_power_up(tp)) {
11629                 etest->flags |= ETH_TEST_FL_FAILED;
11630                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11631                 return;
11632         }
11633
11634         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11635
11636         if (tg3_test_nvram(tp) != 0) {
11637                 etest->flags |= ETH_TEST_FL_FAILED;
11638                 data[0] = 1;
11639         }
11640         if (tg3_test_link(tp) != 0) {
11641                 etest->flags |= ETH_TEST_FL_FAILED;
11642                 data[1] = 1;
11643         }
11644         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11645                 int err, err2 = 0, irq_sync = 0;
11646
11647                 if (netif_running(dev)) {
11648                         tg3_phy_stop(tp);
11649                         tg3_netif_stop(tp);
11650                         irq_sync = 1;
11651                 }
11652
11653                 tg3_full_lock(tp, irq_sync);
11654
11655                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11656                 err = tg3_nvram_lock(tp);
11657                 tg3_halt_cpu(tp, RX_CPU_BASE);
11658                 if (!tg3_flag(tp, 5705_PLUS))
11659                         tg3_halt_cpu(tp, TX_CPU_BASE);
11660                 if (!err)
11661                         tg3_nvram_unlock(tp);
11662
11663                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11664                         tg3_phy_reset(tp);
11665
11666                 if (tg3_test_registers(tp) != 0) {
11667                         etest->flags |= ETH_TEST_FL_FAILED;
11668                         data[2] = 1;
11669                 }
11670                 if (tg3_test_memory(tp) != 0) {
11671                         etest->flags |= ETH_TEST_FL_FAILED;
11672                         data[3] = 1;
11673                 }
11674                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11675                         etest->flags |= ETH_TEST_FL_FAILED;
11676
11677                 tg3_full_unlock(tp);
11678
11679                 if (tg3_test_interrupt(tp) != 0) {
11680                         etest->flags |= ETH_TEST_FL_FAILED;
11681                         data[5] = 1;
11682                 }
11683
11684                 tg3_full_lock(tp, 0);
11685
11686                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11687                 if (netif_running(dev)) {
11688                         tg3_flag_set(tp, INIT_COMPLETE);
11689                         err2 = tg3_restart_hw(tp, 1);
11690                         if (!err2)
11691                                 tg3_netif_start(tp);
11692                 }
11693
11694                 tg3_full_unlock(tp);
11695
11696                 if (irq_sync && !err2)
11697                         tg3_phy_start(tp);
11698         }
11699         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11700                 tg3_power_down(tp);
11701
11702 }
11703
11704 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11705 {
11706         struct mii_ioctl_data *data = if_mii(ifr);
11707         struct tg3 *tp = netdev_priv(dev);
11708         int err;
11709
11710         if (tg3_flag(tp, USE_PHYLIB)) {
11711                 struct phy_device *phydev;
11712                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11713                         return -EAGAIN;
11714                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11715                 return phy_mii_ioctl(phydev, ifr, cmd);
11716         }
11717
11718         switch (cmd) {
11719         case SIOCGMIIPHY:
11720                 data->phy_id = tp->phy_addr;
11721
11722                 /* fallthru */
11723         case SIOCGMIIREG: {
11724                 u32 mii_regval;
11725
11726                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11727                         break;                  /* We have no PHY */
11728
11729                 if (!netif_running(dev))
11730                         return -EAGAIN;
11731
11732                 spin_lock_bh(&tp->lock);
11733                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11734                 spin_unlock_bh(&tp->lock);
11735
11736                 data->val_out = mii_regval;
11737
11738                 return err;
11739         }
11740
11741         case SIOCSMIIREG:
11742                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11743                         break;                  /* We have no PHY */
11744
11745                 if (!netif_running(dev))
11746                         return -EAGAIN;
11747
11748                 spin_lock_bh(&tp->lock);
11749                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11750                 spin_unlock_bh(&tp->lock);
11751
11752                 return err;
11753
11754         default:
11755                 /* do nothing */
11756                 break;
11757         }
11758         return -EOPNOTSUPP;
11759 }
11760
11761 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11762 {
11763         struct tg3 *tp = netdev_priv(dev);
11764
11765         memcpy(ec, &tp->coal, sizeof(*ec));
11766         return 0;
11767 }
11768
11769 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11770 {
11771         struct tg3 *tp = netdev_priv(dev);
11772         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11773         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11774
11775         if (!tg3_flag(tp, 5705_PLUS)) {
11776                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11777                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11778                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11779                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11780         }
11781
11782         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11783             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11784             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11785             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11786             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11787             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11788             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11789             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11790             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11791             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11792                 return -EINVAL;
11793
11794         /* No rx interrupts will be generated if both are zero */
11795         if ((ec->rx_coalesce_usecs == 0) &&
11796             (ec->rx_max_coalesced_frames == 0))
11797                 return -EINVAL;
11798
11799         /* No tx interrupts will be generated if both are zero */
11800         if ((ec->tx_coalesce_usecs == 0) &&
11801             (ec->tx_max_coalesced_frames == 0))
11802                 return -EINVAL;
11803
11804         /* Only copy relevant parameters, ignore all others. */
11805         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11806         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11807         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11808         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11809         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11810         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11811         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11812         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11813         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11814
11815         if (netif_running(dev)) {
11816                 tg3_full_lock(tp, 0);
11817                 __tg3_set_coalesce(tp, &tp->coal);
11818                 tg3_full_unlock(tp);
11819         }
11820         return 0;
11821 }
11822
11823 static const struct ethtool_ops tg3_ethtool_ops = {
11824         .get_settings           = tg3_get_settings,
11825         .set_settings           = tg3_set_settings,
11826         .get_drvinfo            = tg3_get_drvinfo,
11827         .get_regs_len           = tg3_get_regs_len,
11828         .get_regs               = tg3_get_regs,
11829         .get_wol                = tg3_get_wol,
11830         .set_wol                = tg3_set_wol,
11831         .get_msglevel           = tg3_get_msglevel,
11832         .set_msglevel           = tg3_set_msglevel,
11833         .nway_reset             = tg3_nway_reset,
11834         .get_link               = ethtool_op_get_link,
11835         .get_eeprom_len         = tg3_get_eeprom_len,
11836         .get_eeprom             = tg3_get_eeprom,
11837         .set_eeprom             = tg3_set_eeprom,
11838         .get_ringparam          = tg3_get_ringparam,
11839         .set_ringparam          = tg3_set_ringparam,
11840         .get_pauseparam         = tg3_get_pauseparam,
11841         .set_pauseparam         = tg3_set_pauseparam,
11842         .self_test              = tg3_self_test,
11843         .get_strings            = tg3_get_strings,
11844         .set_phys_id            = tg3_set_phys_id,
11845         .get_ethtool_stats      = tg3_get_ethtool_stats,
11846         .get_coalesce           = tg3_get_coalesce,
11847         .set_coalesce           = tg3_set_coalesce,
11848         .get_sset_count         = tg3_get_sset_count,
11849 };
11850
11851 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11852 {
11853         u32 cursize, val, magic;
11854
11855         tp->nvram_size = EEPROM_CHIP_SIZE;
11856
11857         if (tg3_nvram_read(tp, 0, &magic) != 0)
11858                 return;
11859
11860         if ((magic != TG3_EEPROM_MAGIC) &&
11861             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11862             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11863                 return;
11864
11865         /*
11866          * Size the chip by reading offsets at increasing powers of two.
11867          * When we encounter our validation signature, we know the addressing
11868          * has wrapped around, and thus have our chip size.
11869          */
11870         cursize = 0x10;
11871
11872         while (cursize < tp->nvram_size) {
11873                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11874                         return;
11875
11876                 if (val == magic)
11877                         break;
11878
11879                 cursize <<= 1;
11880         }
11881
11882         tp->nvram_size = cursize;
11883 }
11884
11885 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11886 {
11887         u32 val;
11888
11889         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11890                 return;
11891
11892         /* Selfboot format */
11893         if (val != TG3_EEPROM_MAGIC) {
11894                 tg3_get_eeprom_size(tp);
11895                 return;
11896         }
11897
11898         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11899                 if (val != 0) {
11900                         /* This is confusing.  We want to operate on the
11901                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11902                          * call will read from NVRAM and byteswap the data
11903                          * according to the byteswapping settings for all
11904                          * other register accesses.  This ensures the data we
11905                          * want will always reside in the lower 16-bits.
11906                          * However, the data in NVRAM is in LE format, which
11907                          * means the data from the NVRAM read will always be
11908                          * opposite the endianness of the CPU.  The 16-bit
11909                          * byteswap then brings the data to CPU endianness.
11910                          */
11911                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11912                         return;
11913                 }
11914         }
11915         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11916 }
11917
11918 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11919 {
11920         u32 nvcfg1;
11921
11922         nvcfg1 = tr32(NVRAM_CFG1);
11923         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11924                 tg3_flag_set(tp, FLASH);
11925         } else {
11926                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11927                 tw32(NVRAM_CFG1, nvcfg1);
11928         }
11929
11930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11931             tg3_flag(tp, 5780_CLASS)) {
11932                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11933                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11934                         tp->nvram_jedecnum = JEDEC_ATMEL;
11935                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11936                         tg3_flag_set(tp, NVRAM_BUFFERED);
11937                         break;
11938                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11939                         tp->nvram_jedecnum = JEDEC_ATMEL;
11940                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11941                         break;
11942                 case FLASH_VENDOR_ATMEL_EEPROM:
11943                         tp->nvram_jedecnum = JEDEC_ATMEL;
11944                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11945                         tg3_flag_set(tp, NVRAM_BUFFERED);
11946                         break;
11947                 case FLASH_VENDOR_ST:
11948                         tp->nvram_jedecnum = JEDEC_ST;
11949                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11950                         tg3_flag_set(tp, NVRAM_BUFFERED);
11951                         break;
11952                 case FLASH_VENDOR_SAIFUN:
11953                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11954                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11955                         break;
11956                 case FLASH_VENDOR_SST_SMALL:
11957                 case FLASH_VENDOR_SST_LARGE:
11958                         tp->nvram_jedecnum = JEDEC_SST;
11959                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11960                         break;
11961                 }
11962         } else {
11963                 tp->nvram_jedecnum = JEDEC_ATMEL;
11964                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11965                 tg3_flag_set(tp, NVRAM_BUFFERED);
11966         }
11967 }
11968
11969 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11970 {
11971         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11972         case FLASH_5752PAGE_SIZE_256:
11973                 tp->nvram_pagesize = 256;
11974                 break;
11975         case FLASH_5752PAGE_SIZE_512:
11976                 tp->nvram_pagesize = 512;
11977                 break;
11978         case FLASH_5752PAGE_SIZE_1K:
11979                 tp->nvram_pagesize = 1024;
11980                 break;
11981         case FLASH_5752PAGE_SIZE_2K:
11982                 tp->nvram_pagesize = 2048;
11983                 break;
11984         case FLASH_5752PAGE_SIZE_4K:
11985                 tp->nvram_pagesize = 4096;
11986                 break;
11987         case FLASH_5752PAGE_SIZE_264:
11988                 tp->nvram_pagesize = 264;
11989                 break;
11990         case FLASH_5752PAGE_SIZE_528:
11991                 tp->nvram_pagesize = 528;
11992                 break;
11993         }
11994 }
11995
11996 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11997 {
11998         u32 nvcfg1;
11999
12000         nvcfg1 = tr32(NVRAM_CFG1);
12001
12002         /* NVRAM protection for TPM */
12003         if (nvcfg1 & (1 << 27))
12004                 tg3_flag_set(tp, PROTECTED_NVRAM);
12005
12006         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12007         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12008         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12009                 tp->nvram_jedecnum = JEDEC_ATMEL;
12010                 tg3_flag_set(tp, NVRAM_BUFFERED);
12011                 break;
12012         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12013                 tp->nvram_jedecnum = JEDEC_ATMEL;
12014                 tg3_flag_set(tp, NVRAM_BUFFERED);
12015                 tg3_flag_set(tp, FLASH);
12016                 break;
12017         case FLASH_5752VENDOR_ST_M45PE10:
12018         case FLASH_5752VENDOR_ST_M45PE20:
12019         case FLASH_5752VENDOR_ST_M45PE40:
12020                 tp->nvram_jedecnum = JEDEC_ST;
12021                 tg3_flag_set(tp, NVRAM_BUFFERED);
12022                 tg3_flag_set(tp, FLASH);
12023                 break;
12024         }
12025
12026         if (tg3_flag(tp, FLASH)) {
12027                 tg3_nvram_get_pagesize(tp, nvcfg1);
12028         } else {
12029                 /* For eeprom, set pagesize to maximum eeprom size */
12030                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12031
12032                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12033                 tw32(NVRAM_CFG1, nvcfg1);
12034         }
12035 }
12036
12037 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12038 {
12039         u32 nvcfg1, protect = 0;
12040
12041         nvcfg1 = tr32(NVRAM_CFG1);
12042
12043         /* NVRAM protection for TPM */
12044         if (nvcfg1 & (1 << 27)) {
12045                 tg3_flag_set(tp, PROTECTED_NVRAM);
12046                 protect = 1;
12047         }
12048
12049         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12050         switch (nvcfg1) {
12051         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12052         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12053         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12054         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12055                 tp->nvram_jedecnum = JEDEC_ATMEL;
12056                 tg3_flag_set(tp, NVRAM_BUFFERED);
12057                 tg3_flag_set(tp, FLASH);
12058                 tp->nvram_pagesize = 264;
12059                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12060                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12061                         tp->nvram_size = (protect ? 0x3e200 :
12062                                           TG3_NVRAM_SIZE_512KB);
12063                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12064                         tp->nvram_size = (protect ? 0x1f200 :
12065                                           TG3_NVRAM_SIZE_256KB);
12066                 else
12067                         tp->nvram_size = (protect ? 0x1f200 :
12068                                           TG3_NVRAM_SIZE_128KB);
12069                 break;
12070         case FLASH_5752VENDOR_ST_M45PE10:
12071         case FLASH_5752VENDOR_ST_M45PE20:
12072         case FLASH_5752VENDOR_ST_M45PE40:
12073                 tp->nvram_jedecnum = JEDEC_ST;
12074                 tg3_flag_set(tp, NVRAM_BUFFERED);
12075                 tg3_flag_set(tp, FLASH);
12076                 tp->nvram_pagesize = 256;
12077                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12078                         tp->nvram_size = (protect ?
12079                                           TG3_NVRAM_SIZE_64KB :
12080                                           TG3_NVRAM_SIZE_128KB);
12081                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12082                         tp->nvram_size = (protect ?
12083                                           TG3_NVRAM_SIZE_64KB :
12084                                           TG3_NVRAM_SIZE_256KB);
12085                 else
12086                         tp->nvram_size = (protect ?
12087                                           TG3_NVRAM_SIZE_128KB :
12088                                           TG3_NVRAM_SIZE_512KB);
12089                 break;
12090         }
12091 }
12092
12093 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12094 {
12095         u32 nvcfg1;
12096
12097         nvcfg1 = tr32(NVRAM_CFG1);
12098
12099         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12100         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12101         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12102         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12103         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12104                 tp->nvram_jedecnum = JEDEC_ATMEL;
12105                 tg3_flag_set(tp, NVRAM_BUFFERED);
12106                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12107
12108                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12109                 tw32(NVRAM_CFG1, nvcfg1);
12110                 break;
12111         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12112         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12113         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12114         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12115                 tp->nvram_jedecnum = JEDEC_ATMEL;
12116                 tg3_flag_set(tp, NVRAM_BUFFERED);
12117                 tg3_flag_set(tp, FLASH);
12118                 tp->nvram_pagesize = 264;
12119                 break;
12120         case FLASH_5752VENDOR_ST_M45PE10:
12121         case FLASH_5752VENDOR_ST_M45PE20:
12122         case FLASH_5752VENDOR_ST_M45PE40:
12123                 tp->nvram_jedecnum = JEDEC_ST;
12124                 tg3_flag_set(tp, NVRAM_BUFFERED);
12125                 tg3_flag_set(tp, FLASH);
12126                 tp->nvram_pagesize = 256;
12127                 break;
12128         }
12129 }
12130
12131 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12132 {
12133         u32 nvcfg1, protect = 0;
12134
12135         nvcfg1 = tr32(NVRAM_CFG1);
12136
12137         /* NVRAM protection for TPM */
12138         if (nvcfg1 & (1 << 27)) {
12139                 tg3_flag_set(tp, PROTECTED_NVRAM);
12140                 protect = 1;
12141         }
12142
12143         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12144         switch (nvcfg1) {
12145         case FLASH_5761VENDOR_ATMEL_ADB021D:
12146         case FLASH_5761VENDOR_ATMEL_ADB041D:
12147         case FLASH_5761VENDOR_ATMEL_ADB081D:
12148         case FLASH_5761VENDOR_ATMEL_ADB161D:
12149         case FLASH_5761VENDOR_ATMEL_MDB021D:
12150         case FLASH_5761VENDOR_ATMEL_MDB041D:
12151         case FLASH_5761VENDOR_ATMEL_MDB081D:
12152         case FLASH_5761VENDOR_ATMEL_MDB161D:
12153                 tp->nvram_jedecnum = JEDEC_ATMEL;
12154                 tg3_flag_set(tp, NVRAM_BUFFERED);
12155                 tg3_flag_set(tp, FLASH);
12156                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12157                 tp->nvram_pagesize = 256;
12158                 break;
12159         case FLASH_5761VENDOR_ST_A_M45PE20:
12160         case FLASH_5761VENDOR_ST_A_M45PE40:
12161         case FLASH_5761VENDOR_ST_A_M45PE80:
12162         case FLASH_5761VENDOR_ST_A_M45PE16:
12163         case FLASH_5761VENDOR_ST_M_M45PE20:
12164         case FLASH_5761VENDOR_ST_M_M45PE40:
12165         case FLASH_5761VENDOR_ST_M_M45PE80:
12166         case FLASH_5761VENDOR_ST_M_M45PE16:
12167                 tp->nvram_jedecnum = JEDEC_ST;
12168                 tg3_flag_set(tp, NVRAM_BUFFERED);
12169                 tg3_flag_set(tp, FLASH);
12170                 tp->nvram_pagesize = 256;
12171                 break;
12172         }
12173
12174         if (protect) {
12175                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12176         } else {
12177                 switch (nvcfg1) {
12178                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12179                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12180                 case FLASH_5761VENDOR_ST_A_M45PE16:
12181                 case FLASH_5761VENDOR_ST_M_M45PE16:
12182                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12183                         break;
12184                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12185                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12186                 case FLASH_5761VENDOR_ST_A_M45PE80:
12187                 case FLASH_5761VENDOR_ST_M_M45PE80:
12188                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12189                         break;
12190                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12191                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12192                 case FLASH_5761VENDOR_ST_A_M45PE40:
12193                 case FLASH_5761VENDOR_ST_M_M45PE40:
12194                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12195                         break;
12196                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12197                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12198                 case FLASH_5761VENDOR_ST_A_M45PE20:
12199                 case FLASH_5761VENDOR_ST_M_M45PE20:
12200                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12201                         break;
12202                 }
12203         }
12204 }
12205
12206 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12207 {
12208         tp->nvram_jedecnum = JEDEC_ATMEL;
12209         tg3_flag_set(tp, NVRAM_BUFFERED);
12210         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12211 }
12212
12213 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12214 {
12215         u32 nvcfg1;
12216
12217         nvcfg1 = tr32(NVRAM_CFG1);
12218
12219         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12220         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12221         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12222                 tp->nvram_jedecnum = JEDEC_ATMEL;
12223                 tg3_flag_set(tp, NVRAM_BUFFERED);
12224                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12225
12226                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12227                 tw32(NVRAM_CFG1, nvcfg1);
12228                 return;
12229         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12230         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12231         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12232         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12233         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12234         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12235         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12236                 tp->nvram_jedecnum = JEDEC_ATMEL;
12237                 tg3_flag_set(tp, NVRAM_BUFFERED);
12238                 tg3_flag_set(tp, FLASH);
12239
12240                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12241                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12242                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12243                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12244                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12245                         break;
12246                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12247                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12248                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12249                         break;
12250                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12251                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12252                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12253                         break;
12254                 }
12255                 break;
12256         case FLASH_5752VENDOR_ST_M45PE10:
12257         case FLASH_5752VENDOR_ST_M45PE20:
12258         case FLASH_5752VENDOR_ST_M45PE40:
12259                 tp->nvram_jedecnum = JEDEC_ST;
12260                 tg3_flag_set(tp, NVRAM_BUFFERED);
12261                 tg3_flag_set(tp, FLASH);
12262
12263                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12264                 case FLASH_5752VENDOR_ST_M45PE10:
12265                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12266                         break;
12267                 case FLASH_5752VENDOR_ST_M45PE20:
12268                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12269                         break;
12270                 case FLASH_5752VENDOR_ST_M45PE40:
12271                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12272                         break;
12273                 }
12274                 break;
12275         default:
12276                 tg3_flag_set(tp, NO_NVRAM);
12277                 return;
12278         }
12279
12280         tg3_nvram_get_pagesize(tp, nvcfg1);
12281         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12282                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12283 }
12284
12285
12286 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12287 {
12288         u32 nvcfg1;
12289
12290         nvcfg1 = tr32(NVRAM_CFG1);
12291
12292         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12293         case FLASH_5717VENDOR_ATMEL_EEPROM:
12294         case FLASH_5717VENDOR_MICRO_EEPROM:
12295                 tp->nvram_jedecnum = JEDEC_ATMEL;
12296                 tg3_flag_set(tp, NVRAM_BUFFERED);
12297                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12298
12299                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12300                 tw32(NVRAM_CFG1, nvcfg1);
12301                 return;
12302         case FLASH_5717VENDOR_ATMEL_MDB011D:
12303         case FLASH_5717VENDOR_ATMEL_ADB011B:
12304         case FLASH_5717VENDOR_ATMEL_ADB011D:
12305         case FLASH_5717VENDOR_ATMEL_MDB021D:
12306         case FLASH_5717VENDOR_ATMEL_ADB021B:
12307         case FLASH_5717VENDOR_ATMEL_ADB021D:
12308         case FLASH_5717VENDOR_ATMEL_45USPT:
12309                 tp->nvram_jedecnum = JEDEC_ATMEL;
12310                 tg3_flag_set(tp, NVRAM_BUFFERED);
12311                 tg3_flag_set(tp, FLASH);
12312
12313                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12314                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12315                         /* Detect size with tg3_nvram_get_size() */
12316                         break;
12317                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12318                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12319                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12320                         break;
12321                 default:
12322                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12323                         break;
12324                 }
12325                 break;
12326         case FLASH_5717VENDOR_ST_M_M25PE10:
12327         case FLASH_5717VENDOR_ST_A_M25PE10:
12328         case FLASH_5717VENDOR_ST_M_M45PE10:
12329         case FLASH_5717VENDOR_ST_A_M45PE10:
12330         case FLASH_5717VENDOR_ST_M_M25PE20:
12331         case FLASH_5717VENDOR_ST_A_M25PE20:
12332         case FLASH_5717VENDOR_ST_M_M45PE20:
12333         case FLASH_5717VENDOR_ST_A_M45PE20:
12334         case FLASH_5717VENDOR_ST_25USPT:
12335         case FLASH_5717VENDOR_ST_45USPT:
12336                 tp->nvram_jedecnum = JEDEC_ST;
12337                 tg3_flag_set(tp, NVRAM_BUFFERED);
12338                 tg3_flag_set(tp, FLASH);
12339
12340                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12341                 case FLASH_5717VENDOR_ST_M_M25PE20:
12342                 case FLASH_5717VENDOR_ST_M_M45PE20:
12343                         /* Detect size with tg3_nvram_get_size() */
12344                         break;
12345                 case FLASH_5717VENDOR_ST_A_M25PE20:
12346                 case FLASH_5717VENDOR_ST_A_M45PE20:
12347                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12348                         break;
12349                 default:
12350                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12351                         break;
12352                 }
12353                 break;
12354         default:
12355                 tg3_flag_set(tp, NO_NVRAM);
12356                 return;
12357         }
12358
12359         tg3_nvram_get_pagesize(tp, nvcfg1);
12360         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12361                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12362 }
12363
12364 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12365 {
12366         u32 nvcfg1, nvmpinstrp;
12367
12368         nvcfg1 = tr32(NVRAM_CFG1);
12369         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12370
12371         switch (nvmpinstrp) {
12372         case FLASH_5720_EEPROM_HD:
12373         case FLASH_5720_EEPROM_LD:
12374                 tp->nvram_jedecnum = JEDEC_ATMEL;
12375                 tg3_flag_set(tp, NVRAM_BUFFERED);
12376
12377                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12378                 tw32(NVRAM_CFG1, nvcfg1);
12379                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12380                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12381                 else
12382                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12383                 return;
12384         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12385         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12386         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12387         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12388         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12389         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12390         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12391         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12392         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12393         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12394         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12395         case FLASH_5720VENDOR_ATMEL_45USPT:
12396                 tp->nvram_jedecnum = JEDEC_ATMEL;
12397                 tg3_flag_set(tp, NVRAM_BUFFERED);
12398                 tg3_flag_set(tp, FLASH);
12399
12400                 switch (nvmpinstrp) {
12401                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12402                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12403                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12404                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12405                         break;
12406                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12407                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12408                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12409                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12410                         break;
12411                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12412                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12413                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12414                         break;
12415                 default:
12416                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12417                         break;
12418                 }
12419                 break;
12420         case FLASH_5720VENDOR_M_ST_M25PE10:
12421         case FLASH_5720VENDOR_M_ST_M45PE10:
12422         case FLASH_5720VENDOR_A_ST_M25PE10:
12423         case FLASH_5720VENDOR_A_ST_M45PE10:
12424         case FLASH_5720VENDOR_M_ST_M25PE20:
12425         case FLASH_5720VENDOR_M_ST_M45PE20:
12426         case FLASH_5720VENDOR_A_ST_M25PE20:
12427         case FLASH_5720VENDOR_A_ST_M45PE20:
12428         case FLASH_5720VENDOR_M_ST_M25PE40:
12429         case FLASH_5720VENDOR_M_ST_M45PE40:
12430         case FLASH_5720VENDOR_A_ST_M25PE40:
12431         case FLASH_5720VENDOR_A_ST_M45PE40:
12432         case FLASH_5720VENDOR_M_ST_M25PE80:
12433         case FLASH_5720VENDOR_M_ST_M45PE80:
12434         case FLASH_5720VENDOR_A_ST_M25PE80:
12435         case FLASH_5720VENDOR_A_ST_M45PE80:
12436         case FLASH_5720VENDOR_ST_25USPT:
12437         case FLASH_5720VENDOR_ST_45USPT:
12438                 tp->nvram_jedecnum = JEDEC_ST;
12439                 tg3_flag_set(tp, NVRAM_BUFFERED);
12440                 tg3_flag_set(tp, FLASH);
12441
12442                 switch (nvmpinstrp) {
12443                 case FLASH_5720VENDOR_M_ST_M25PE20:
12444                 case FLASH_5720VENDOR_M_ST_M45PE20:
12445                 case FLASH_5720VENDOR_A_ST_M25PE20:
12446                 case FLASH_5720VENDOR_A_ST_M45PE20:
12447                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12448                         break;
12449                 case FLASH_5720VENDOR_M_ST_M25PE40:
12450                 case FLASH_5720VENDOR_M_ST_M45PE40:
12451                 case FLASH_5720VENDOR_A_ST_M25PE40:
12452                 case FLASH_5720VENDOR_A_ST_M45PE40:
12453                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12454                         break;
12455                 case FLASH_5720VENDOR_M_ST_M25PE80:
12456                 case FLASH_5720VENDOR_M_ST_M45PE80:
12457                 case FLASH_5720VENDOR_A_ST_M25PE80:
12458                 case FLASH_5720VENDOR_A_ST_M45PE80:
12459                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12460                         break;
12461                 default:
12462                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12463                         break;
12464                 }
12465                 break;
12466         default:
12467                 tg3_flag_set(tp, NO_NVRAM);
12468                 return;
12469         }
12470
12471         tg3_nvram_get_pagesize(tp, nvcfg1);
12472         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12473                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12474 }
12475
12476 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12477 static void __devinit tg3_nvram_init(struct tg3 *tp)
12478 {
12479         tw32_f(GRC_EEPROM_ADDR,
12480              (EEPROM_ADDR_FSM_RESET |
12481               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12482                EEPROM_ADDR_CLKPERD_SHIFT)));
12483
12484         msleep(1);
12485
12486         /* Enable seeprom accesses. */
12487         tw32_f(GRC_LOCAL_CTRL,
12488              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12489         udelay(100);
12490
12491         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12492             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12493                 tg3_flag_set(tp, NVRAM);
12494
12495                 if (tg3_nvram_lock(tp)) {
12496                         netdev_warn(tp->dev,
12497                                     "Cannot get nvram lock, %s failed\n",
12498                                     __func__);
12499                         return;
12500                 }
12501                 tg3_enable_nvram_access(tp);
12502
12503                 tp->nvram_size = 0;
12504
12505                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12506                         tg3_get_5752_nvram_info(tp);
12507                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12508                         tg3_get_5755_nvram_info(tp);
12509                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12510                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12511                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12512                         tg3_get_5787_nvram_info(tp);
12513                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12514                         tg3_get_5761_nvram_info(tp);
12515                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12516                         tg3_get_5906_nvram_info(tp);
12517                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12518                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12519                         tg3_get_57780_nvram_info(tp);
12520                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12521                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12522                         tg3_get_5717_nvram_info(tp);
12523                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12524                         tg3_get_5720_nvram_info(tp);
12525                 else
12526                         tg3_get_nvram_info(tp);
12527
12528                 if (tp->nvram_size == 0)
12529                         tg3_get_nvram_size(tp);
12530
12531                 tg3_disable_nvram_access(tp);
12532                 tg3_nvram_unlock(tp);
12533
12534         } else {
12535                 tg3_flag_clear(tp, NVRAM);
12536                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12537
12538                 tg3_get_eeprom_size(tp);
12539         }
12540 }
12541
12542 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12543                                     u32 offset, u32 len, u8 *buf)
12544 {
12545         int i, j, rc = 0;
12546         u32 val;
12547
12548         for (i = 0; i < len; i += 4) {
12549                 u32 addr;
12550                 __be32 data;
12551
12552                 addr = offset + i;
12553
12554                 memcpy(&data, buf + i, 4);
12555
12556                 /*
12557                  * The SEEPROM interface expects the data to always be opposite
12558                  * the native endian format.  We accomplish this by reversing
12559                  * all the operations that would have been performed on the
12560                  * data from a call to tg3_nvram_read_be32().
12561                  */
12562                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12563
12564                 val = tr32(GRC_EEPROM_ADDR);
12565                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12566
12567                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12568                         EEPROM_ADDR_READ);
12569                 tw32(GRC_EEPROM_ADDR, val |
12570                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12571                         (addr & EEPROM_ADDR_ADDR_MASK) |
12572                         EEPROM_ADDR_START |
12573                         EEPROM_ADDR_WRITE);
12574
12575                 for (j = 0; j < 1000; j++) {
12576                         val = tr32(GRC_EEPROM_ADDR);
12577
12578                         if (val & EEPROM_ADDR_COMPLETE)
12579                                 break;
12580                         msleep(1);
12581                 }
12582                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12583                         rc = -EBUSY;
12584                         break;
12585                 }
12586         }
12587
12588         return rc;
12589 }
12590
12591 /* offset and length are dword aligned */
12592 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12593                 u8 *buf)
12594 {
12595         int ret = 0;
12596         u32 pagesize = tp->nvram_pagesize;
12597         u32 pagemask = pagesize - 1;
12598         u32 nvram_cmd;
12599         u8 *tmp;
12600
12601         tmp = kmalloc(pagesize, GFP_KERNEL);
12602         if (tmp == NULL)
12603                 return -ENOMEM;
12604
12605         while (len) {
12606                 int j;
12607                 u32 phy_addr, page_off, size;
12608
12609                 phy_addr = offset & ~pagemask;
12610
12611                 for (j = 0; j < pagesize; j += 4) {
12612                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12613                                                   (__be32 *) (tmp + j));
12614                         if (ret)
12615                                 break;
12616                 }
12617                 if (ret)
12618                         break;
12619
12620                 page_off = offset & pagemask;
12621                 size = pagesize;
12622                 if (len < size)
12623                         size = len;
12624
12625                 len -= size;
12626
12627                 memcpy(tmp + page_off, buf, size);
12628
12629                 offset = offset + (pagesize - page_off);
12630
12631                 tg3_enable_nvram_access(tp);
12632
12633                 /*
12634                  * Before we can erase the flash page, we need
12635                  * to issue a special "write enable" command.
12636                  */
12637                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12638
12639                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12640                         break;
12641
12642                 /* Erase the target page */
12643                 tw32(NVRAM_ADDR, phy_addr);
12644
12645                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12646                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12647
12648                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12649                         break;
12650
12651                 /* Issue another write enable to start the write. */
12652                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12653
12654                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12655                         break;
12656
12657                 for (j = 0; j < pagesize; j += 4) {
12658                         __be32 data;
12659
12660                         data = *((__be32 *) (tmp + j));
12661
12662                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12663
12664                         tw32(NVRAM_ADDR, phy_addr + j);
12665
12666                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12667                                 NVRAM_CMD_WR;
12668
12669                         if (j == 0)
12670                                 nvram_cmd |= NVRAM_CMD_FIRST;
12671                         else if (j == (pagesize - 4))
12672                                 nvram_cmd |= NVRAM_CMD_LAST;
12673
12674                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12675                                 break;
12676                 }
12677                 if (ret)
12678                         break;
12679         }
12680
12681         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12682         tg3_nvram_exec_cmd(tp, nvram_cmd);
12683
12684         kfree(tmp);
12685
12686         return ret;
12687 }
12688
12689 /* offset and length are dword aligned */
12690 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12691                 u8 *buf)
12692 {
12693         int i, ret = 0;
12694
12695         for (i = 0; i < len; i += 4, offset += 4) {
12696                 u32 page_off, phy_addr, nvram_cmd;
12697                 __be32 data;
12698
12699                 memcpy(&data, buf + i, 4);
12700                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12701
12702                 page_off = offset % tp->nvram_pagesize;
12703
12704                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12705
12706                 tw32(NVRAM_ADDR, phy_addr);
12707
12708                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12709
12710                 if (page_off == 0 || i == 0)
12711                         nvram_cmd |= NVRAM_CMD_FIRST;
12712                 if (page_off == (tp->nvram_pagesize - 4))
12713                         nvram_cmd |= NVRAM_CMD_LAST;
12714
12715                 if (i == (len - 4))
12716                         nvram_cmd |= NVRAM_CMD_LAST;
12717
12718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12719                     !tg3_flag(tp, 5755_PLUS) &&
12720                     (tp->nvram_jedecnum == JEDEC_ST) &&
12721                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12722
12723                         if ((ret = tg3_nvram_exec_cmd(tp,
12724                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12725                                 NVRAM_CMD_DONE)))
12726
12727                                 break;
12728                 }
12729                 if (!tg3_flag(tp, FLASH)) {
12730                         /* We always do complete word writes to eeprom. */
12731                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12732                 }
12733
12734                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12735                         break;
12736         }
12737         return ret;
12738 }
12739
12740 /* offset and length are dword aligned */
12741 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12742 {
12743         int ret;
12744
12745         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12746                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12747                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12748                 udelay(40);
12749         }
12750
12751         if (!tg3_flag(tp, NVRAM)) {
12752                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12753         } else {
12754                 u32 grc_mode;
12755
12756                 ret = tg3_nvram_lock(tp);
12757                 if (ret)
12758                         return ret;
12759
12760                 tg3_enable_nvram_access(tp);
12761                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12762                         tw32(NVRAM_WRITE1, 0x406);
12763
12764                 grc_mode = tr32(GRC_MODE);
12765                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12766
12767                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12768                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12769                                 buf);
12770                 } else {
12771                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12772                                 buf);
12773                 }
12774
12775                 grc_mode = tr32(GRC_MODE);
12776                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12777
12778                 tg3_disable_nvram_access(tp);
12779                 tg3_nvram_unlock(tp);
12780         }
12781
12782         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12783                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12784                 udelay(40);
12785         }
12786
12787         return ret;
12788 }
12789
12790 struct subsys_tbl_ent {
12791         u16 subsys_vendor, subsys_devid;
12792         u32 phy_id;
12793 };
12794
12795 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12796         /* Broadcom boards. */
12797         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12798           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12799         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12800           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12801         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12802           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12803         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12804           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12805         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12806           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12807         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12808           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12809         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12810           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12811         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12812           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12813         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12814           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12815         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12816           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12817         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12818           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12819
12820         /* 3com boards. */
12821         { TG3PCI_SUBVENDOR_ID_3COM,
12822           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12823         { TG3PCI_SUBVENDOR_ID_3COM,
12824           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12825         { TG3PCI_SUBVENDOR_ID_3COM,
12826           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12827         { TG3PCI_SUBVENDOR_ID_3COM,
12828           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12829         { TG3PCI_SUBVENDOR_ID_3COM,
12830           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12831
12832         /* DELL boards. */
12833         { TG3PCI_SUBVENDOR_ID_DELL,
12834           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12835         { TG3PCI_SUBVENDOR_ID_DELL,
12836           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12837         { TG3PCI_SUBVENDOR_ID_DELL,
12838           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12839         { TG3PCI_SUBVENDOR_ID_DELL,
12840           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12841
12842         /* Compaq boards. */
12843         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12844           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12845         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12846           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12847         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12848           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12849         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12850           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12851         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12852           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12853
12854         /* IBM boards. */
12855         { TG3PCI_SUBVENDOR_ID_IBM,
12856           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12857 };
12858
12859 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12860 {
12861         int i;
12862
12863         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12864                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12865                      tp->pdev->subsystem_vendor) &&
12866                     (subsys_id_to_phy_id[i].subsys_devid ==
12867                      tp->pdev->subsystem_device))
12868                         return &subsys_id_to_phy_id[i];
12869         }
12870         return NULL;
12871 }
12872
12873 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12874 {
12875         u32 val;
12876
12877         tp->phy_id = TG3_PHY_ID_INVALID;
12878         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12879
12880         /* Assume an onboard device and WOL capable by default.  */
12881         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12882         tg3_flag_set(tp, WOL_CAP);
12883
12884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12885                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12886                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12887                         tg3_flag_set(tp, IS_NIC);
12888                 }
12889                 val = tr32(VCPU_CFGSHDW);
12890                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12891                         tg3_flag_set(tp, ASPM_WORKAROUND);
12892                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12893                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12894                         tg3_flag_set(tp, WOL_ENABLE);
12895                         device_set_wakeup_enable(&tp->pdev->dev, true);
12896                 }
12897                 goto done;
12898         }
12899
12900         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12901         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12902                 u32 nic_cfg, led_cfg;
12903                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12904                 int eeprom_phy_serdes = 0;
12905
12906                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12907                 tp->nic_sram_data_cfg = nic_cfg;
12908
12909                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12910                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12911                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12912                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12913                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12914                     (ver > 0) && (ver < 0x100))
12915                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12916
12917                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12918                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12919
12920                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12921                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12922                         eeprom_phy_serdes = 1;
12923
12924                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12925                 if (nic_phy_id != 0) {
12926                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12927                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12928
12929                         eeprom_phy_id  = (id1 >> 16) << 10;
12930                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12931                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12932                 } else
12933                         eeprom_phy_id = 0;
12934
12935                 tp->phy_id = eeprom_phy_id;
12936                 if (eeprom_phy_serdes) {
12937                         if (!tg3_flag(tp, 5705_PLUS))
12938                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12939                         else
12940                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12941                 }
12942
12943                 if (tg3_flag(tp, 5750_PLUS))
12944                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12945                                     SHASTA_EXT_LED_MODE_MASK);
12946                 else
12947                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12948
12949                 switch (led_cfg) {
12950                 default:
12951                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12952                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12953                         break;
12954
12955                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12956                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12957                         break;
12958
12959                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12960                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12961
12962                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12963                          * read on some older 5700/5701 bootcode.
12964                          */
12965                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12966                             ASIC_REV_5700 ||
12967                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12968                             ASIC_REV_5701)
12969                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12970
12971                         break;
12972
12973                 case SHASTA_EXT_LED_SHARED:
12974                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12975                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12976                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12977                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12978                                                  LED_CTRL_MODE_PHY_2);
12979                         break;
12980
12981                 case SHASTA_EXT_LED_MAC:
12982                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12983                         break;
12984
12985                 case SHASTA_EXT_LED_COMBO:
12986                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12987                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12988                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12989                                                  LED_CTRL_MODE_PHY_2);
12990                         break;
12991
12992                 }
12993
12994                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12995                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12996                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12997                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12998
12999                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13000                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13001
13002                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13003                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13004                         if ((tp->pdev->subsystem_vendor ==
13005                              PCI_VENDOR_ID_ARIMA) &&
13006                             (tp->pdev->subsystem_device == 0x205a ||
13007                              tp->pdev->subsystem_device == 0x2063))
13008                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13009                 } else {
13010                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13011                         tg3_flag_set(tp, IS_NIC);
13012                 }
13013
13014                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13015                         tg3_flag_set(tp, ENABLE_ASF);
13016                         if (tg3_flag(tp, 5750_PLUS))
13017                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13018                 }
13019
13020                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13021                     tg3_flag(tp, 5750_PLUS))
13022                         tg3_flag_set(tp, ENABLE_APE);
13023
13024                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13025                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13026                         tg3_flag_clear(tp, WOL_CAP);
13027
13028                 if (tg3_flag(tp, WOL_CAP) &&
13029                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13030                         tg3_flag_set(tp, WOL_ENABLE);
13031                         device_set_wakeup_enable(&tp->pdev->dev, true);
13032                 }
13033
13034                 if (cfg2 & (1 << 17))
13035                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13036
13037                 /* serdes signal pre-emphasis in register 0x590 set by */
13038                 /* bootcode if bit 18 is set */
13039                 if (cfg2 & (1 << 18))
13040                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13041
13042                 if ((tg3_flag(tp, 57765_PLUS) ||
13043                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13044                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13045                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13046                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13047
13048                 if (tg3_flag(tp, PCI_EXPRESS) &&
13049                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13050                     !tg3_flag(tp, 57765_PLUS)) {
13051                         u32 cfg3;
13052
13053                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13054                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13055                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13056                 }
13057
13058                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13059                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13060                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13061                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13062                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13063                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13064         }
13065 done:
13066         if (tg3_flag(tp, WOL_CAP))
13067                 device_set_wakeup_enable(&tp->pdev->dev,
13068                                          tg3_flag(tp, WOL_ENABLE));
13069         else
13070                 device_set_wakeup_capable(&tp->pdev->dev, false);
13071 }
13072
13073 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13074 {
13075         int i;
13076         u32 val;
13077
13078         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13079         tw32(OTP_CTRL, cmd);
13080
13081         /* Wait for up to 1 ms for command to execute. */
13082         for (i = 0; i < 100; i++) {
13083                 val = tr32(OTP_STATUS);
13084                 if (val & OTP_STATUS_CMD_DONE)
13085                         break;
13086                 udelay(10);
13087         }
13088
13089         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13090 }
13091
13092 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13093  * configuration is a 32-bit value that straddles the alignment boundary.
13094  * We do two 32-bit reads and then shift and merge the results.
13095  */
13096 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13097 {
13098         u32 bhalf_otp, thalf_otp;
13099
13100         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13101
13102         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13103                 return 0;
13104
13105         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13106
13107         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13108                 return 0;
13109
13110         thalf_otp = tr32(OTP_READ_DATA);
13111
13112         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13113
13114         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13115                 return 0;
13116
13117         bhalf_otp = tr32(OTP_READ_DATA);
13118
13119         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13120 }
13121
13122 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13123 {
13124         u32 adv = ADVERTISED_Autoneg |
13125                   ADVERTISED_Pause;
13126
13127         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13128                 adv |= ADVERTISED_1000baseT_Half |
13129                        ADVERTISED_1000baseT_Full;
13130
13131         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13132                 adv |= ADVERTISED_100baseT_Half |
13133                        ADVERTISED_100baseT_Full |
13134                        ADVERTISED_10baseT_Half |
13135                        ADVERTISED_10baseT_Full |
13136                        ADVERTISED_TP;
13137         else
13138                 adv |= ADVERTISED_FIBRE;
13139
13140         tp->link_config.advertising = adv;
13141         tp->link_config.speed = SPEED_INVALID;
13142         tp->link_config.duplex = DUPLEX_INVALID;
13143         tp->link_config.autoneg = AUTONEG_ENABLE;
13144         tp->link_config.active_speed = SPEED_INVALID;
13145         tp->link_config.active_duplex = DUPLEX_INVALID;
13146         tp->link_config.orig_speed = SPEED_INVALID;
13147         tp->link_config.orig_duplex = DUPLEX_INVALID;
13148         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13149 }
13150
13151 static int __devinit tg3_phy_probe(struct tg3 *tp)
13152 {
13153         u32 hw_phy_id_1, hw_phy_id_2;
13154         u32 hw_phy_id, hw_phy_id_masked;
13155         int err;
13156
13157         /* flow control autonegotiation is default behavior */
13158         tg3_flag_set(tp, PAUSE_AUTONEG);
13159         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13160
13161         if (tg3_flag(tp, USE_PHYLIB))
13162                 return tg3_phy_init(tp);
13163
13164         /* Reading the PHY ID register can conflict with ASF
13165          * firmware access to the PHY hardware.
13166          */
13167         err = 0;
13168         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13169                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13170         } else {
13171                 /* Now read the physical PHY_ID from the chip and verify
13172                  * that it is sane.  If it doesn't look good, we fall back
13173                  * to either the hard-coded table based PHY_ID and failing
13174                  * that the value found in the eeprom area.
13175                  */
13176                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13177                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13178
13179                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13180                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13181                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13182
13183                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13184         }
13185
13186         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13187                 tp->phy_id = hw_phy_id;
13188                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13189                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13190                 else
13191                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13192         } else {
13193                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13194                         /* Do nothing, phy ID already set up in
13195                          * tg3_get_eeprom_hw_cfg().
13196                          */
13197                 } else {
13198                         struct subsys_tbl_ent *p;
13199
13200                         /* No eeprom signature?  Try the hardcoded
13201                          * subsys device table.
13202                          */
13203                         p = tg3_lookup_by_subsys(tp);
13204                         if (!p)
13205                                 return -ENODEV;
13206
13207                         tp->phy_id = p->phy_id;
13208                         if (!tp->phy_id ||
13209                             tp->phy_id == TG3_PHY_ID_BCM8002)
13210                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13211                 }
13212         }
13213
13214         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13215             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13216              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13217              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13218               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13219              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13220               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13221                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13222
13223         tg3_phy_init_link_config(tp);
13224
13225         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13226             !tg3_flag(tp, ENABLE_APE) &&
13227             !tg3_flag(tp, ENABLE_ASF)) {
13228                 u32 bmsr, mask;
13229
13230                 tg3_readphy(tp, MII_BMSR, &bmsr);
13231                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13232                     (bmsr & BMSR_LSTATUS))
13233                         goto skip_phy_reset;
13234
13235                 err = tg3_phy_reset(tp);
13236                 if (err)
13237                         return err;
13238
13239                 tg3_phy_set_wirespeed(tp);
13240
13241                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13242                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13243                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13244                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13245                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13246                                             tp->link_config.flowctrl);
13247
13248                         tg3_writephy(tp, MII_BMCR,
13249                                      BMCR_ANENABLE | BMCR_ANRESTART);
13250                 }
13251         }
13252
13253 skip_phy_reset:
13254         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13255                 err = tg3_init_5401phy_dsp(tp);
13256                 if (err)
13257                         return err;
13258
13259                 err = tg3_init_5401phy_dsp(tp);
13260         }
13261
13262         return err;
13263 }
13264
13265 static void __devinit tg3_read_vpd(struct tg3 *tp)
13266 {
13267         u8 *vpd_data;
13268         unsigned int block_end, rosize, len;
13269         u32 vpdlen;
13270         int j, i = 0;
13271
13272         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13273         if (!vpd_data)
13274                 goto out_no_vpd;
13275
13276         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13277         if (i < 0)
13278                 goto out_not_found;
13279
13280         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13281         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13282         i += PCI_VPD_LRDT_TAG_SIZE;
13283
13284         if (block_end > vpdlen)
13285                 goto out_not_found;
13286
13287         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13288                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13289         if (j > 0) {
13290                 len = pci_vpd_info_field_size(&vpd_data[j]);
13291
13292                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13293                 if (j + len > block_end || len != 4 ||
13294                     memcmp(&vpd_data[j], "1028", 4))
13295                         goto partno;
13296
13297                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13298                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13299                 if (j < 0)
13300                         goto partno;
13301
13302                 len = pci_vpd_info_field_size(&vpd_data[j]);
13303
13304                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13305                 if (j + len > block_end)
13306                         goto partno;
13307
13308                 memcpy(tp->fw_ver, &vpd_data[j], len);
13309                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13310         }
13311
13312 partno:
13313         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13314                                       PCI_VPD_RO_KEYWORD_PARTNO);
13315         if (i < 0)
13316                 goto out_not_found;
13317
13318         len = pci_vpd_info_field_size(&vpd_data[i]);
13319
13320         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13321         if (len > TG3_BPN_SIZE ||
13322             (len + i) > vpdlen)
13323                 goto out_not_found;
13324
13325         memcpy(tp->board_part_number, &vpd_data[i], len);
13326
13327 out_not_found:
13328         kfree(vpd_data);
13329         if (tp->board_part_number[0])
13330                 return;
13331
13332 out_no_vpd:
13333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13334                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13335                         strcpy(tp->board_part_number, "BCM5717");
13336                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13337                         strcpy(tp->board_part_number, "BCM5718");
13338                 else
13339                         goto nomatch;
13340         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13341                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13342                         strcpy(tp->board_part_number, "BCM57780");
13343                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13344                         strcpy(tp->board_part_number, "BCM57760");
13345                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13346                         strcpy(tp->board_part_number, "BCM57790");
13347                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13348                         strcpy(tp->board_part_number, "BCM57788");
13349                 else
13350                         goto nomatch;
13351         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13352                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13353                         strcpy(tp->board_part_number, "BCM57761");
13354                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13355                         strcpy(tp->board_part_number, "BCM57765");
13356                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13357                         strcpy(tp->board_part_number, "BCM57781");
13358                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13359                         strcpy(tp->board_part_number, "BCM57785");
13360                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13361                         strcpy(tp->board_part_number, "BCM57791");
13362                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13363                         strcpy(tp->board_part_number, "BCM57795");
13364                 else
13365                         goto nomatch;
13366         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13367                 strcpy(tp->board_part_number, "BCM95906");
13368         } else {
13369 nomatch:
13370                 strcpy(tp->board_part_number, "none");
13371         }
13372 }
13373
13374 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13375 {
13376         u32 val;
13377
13378         if (tg3_nvram_read(tp, offset, &val) ||
13379             (val & 0xfc000000) != 0x0c000000 ||
13380             tg3_nvram_read(tp, offset + 4, &val) ||
13381             val != 0)
13382                 return 0;
13383
13384         return 1;
13385 }
13386
13387 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13388 {
13389         u32 val, offset, start, ver_offset;
13390         int i, dst_off;
13391         bool newver = false;
13392
13393         if (tg3_nvram_read(tp, 0xc, &offset) ||
13394             tg3_nvram_read(tp, 0x4, &start))
13395                 return;
13396
13397         offset = tg3_nvram_logical_addr(tp, offset);
13398
13399         if (tg3_nvram_read(tp, offset, &val))
13400                 return;
13401
13402         if ((val & 0xfc000000) == 0x0c000000) {
13403                 if (tg3_nvram_read(tp, offset + 4, &val))
13404                         return;
13405
13406                 if (val == 0)
13407                         newver = true;
13408         }
13409
13410         dst_off = strlen(tp->fw_ver);
13411
13412         if (newver) {
13413                 if (TG3_VER_SIZE - dst_off < 16 ||
13414                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13415                         return;
13416
13417                 offset = offset + ver_offset - start;
13418                 for (i = 0; i < 16; i += 4) {
13419                         __be32 v;
13420                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13421                                 return;
13422
13423                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13424                 }
13425         } else {
13426                 u32 major, minor;
13427
13428                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13429                         return;
13430
13431                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13432                         TG3_NVM_BCVER_MAJSFT;
13433                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13434                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13435                          "v%d.%02d", major, minor);
13436         }
13437 }
13438
13439 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13440 {
13441         u32 val, major, minor;
13442
13443         /* Use native endian representation */
13444         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13445                 return;
13446
13447         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13448                 TG3_NVM_HWSB_CFG1_MAJSFT;
13449         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13450                 TG3_NVM_HWSB_CFG1_MINSFT;
13451
13452         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13453 }
13454
13455 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13456 {
13457         u32 offset, major, minor, build;
13458
13459         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13460
13461         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13462                 return;
13463
13464         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13465         case TG3_EEPROM_SB_REVISION_0:
13466                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13467                 break;
13468         case TG3_EEPROM_SB_REVISION_2:
13469                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13470                 break;
13471         case TG3_EEPROM_SB_REVISION_3:
13472                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13473                 break;
13474         case TG3_EEPROM_SB_REVISION_4:
13475                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13476                 break;
13477         case TG3_EEPROM_SB_REVISION_5:
13478                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13479                 break;
13480         case TG3_EEPROM_SB_REVISION_6:
13481                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13482                 break;
13483         default:
13484                 return;
13485         }
13486
13487         if (tg3_nvram_read(tp, offset, &val))
13488                 return;
13489
13490         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13491                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13492         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13493                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13494         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13495
13496         if (minor > 99 || build > 26)
13497                 return;
13498
13499         offset = strlen(tp->fw_ver);
13500         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13501                  " v%d.%02d", major, minor);
13502
13503         if (build > 0) {
13504                 offset = strlen(tp->fw_ver);
13505                 if (offset < TG3_VER_SIZE - 1)
13506                         tp->fw_ver[offset] = 'a' + build - 1;
13507         }
13508 }
13509
13510 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13511 {
13512         u32 val, offset, start;
13513         int i, vlen;
13514
13515         for (offset = TG3_NVM_DIR_START;
13516              offset < TG3_NVM_DIR_END;
13517              offset += TG3_NVM_DIRENT_SIZE) {
13518                 if (tg3_nvram_read(tp, offset, &val))
13519                         return;
13520
13521                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13522                         break;
13523         }
13524
13525         if (offset == TG3_NVM_DIR_END)
13526                 return;
13527
13528         if (!tg3_flag(tp, 5705_PLUS))
13529                 start = 0x08000000;
13530         else if (tg3_nvram_read(tp, offset - 4, &start))
13531                 return;
13532
13533         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13534             !tg3_fw_img_is_valid(tp, offset) ||
13535             tg3_nvram_read(tp, offset + 8, &val))
13536                 return;
13537
13538         offset += val - start;
13539
13540         vlen = strlen(tp->fw_ver);
13541
13542         tp->fw_ver[vlen++] = ',';
13543         tp->fw_ver[vlen++] = ' ';
13544
13545         for (i = 0; i < 4; i++) {
13546                 __be32 v;
13547                 if (tg3_nvram_read_be32(tp, offset, &v))
13548                         return;
13549
13550                 offset += sizeof(v);
13551
13552                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13553                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13554                         break;
13555                 }
13556
13557                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13558                 vlen += sizeof(v);
13559         }
13560 }
13561
13562 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13563 {
13564         int vlen;
13565         u32 apedata;
13566         char *fwtype;
13567
13568         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13569                 return;
13570
13571         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13572         if (apedata != APE_SEG_SIG_MAGIC)
13573                 return;
13574
13575         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13576         if (!(apedata & APE_FW_STATUS_READY))
13577                 return;
13578
13579         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13580
13581         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13582                 tg3_flag_set(tp, APE_HAS_NCSI);
13583                 fwtype = "NCSI";
13584         } else {
13585                 fwtype = "DASH";
13586         }
13587
13588         vlen = strlen(tp->fw_ver);
13589
13590         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13591                  fwtype,
13592                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13593                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13594                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13595                  (apedata & APE_FW_VERSION_BLDMSK));
13596 }
13597
13598 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13599 {
13600         u32 val;
13601         bool vpd_vers = false;
13602
13603         if (tp->fw_ver[0] != 0)
13604                 vpd_vers = true;
13605
13606         if (tg3_flag(tp, NO_NVRAM)) {
13607                 strcat(tp->fw_ver, "sb");
13608                 return;
13609         }
13610
13611         if (tg3_nvram_read(tp, 0, &val))
13612                 return;
13613
13614         if (val == TG3_EEPROM_MAGIC)
13615                 tg3_read_bc_ver(tp);
13616         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13617                 tg3_read_sb_ver(tp, val);
13618         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13619                 tg3_read_hwsb_ver(tp);
13620         else
13621                 return;
13622
13623         if (vpd_vers)
13624                 goto done;
13625
13626         if (tg3_flag(tp, ENABLE_APE)) {
13627                 if (tg3_flag(tp, ENABLE_ASF))
13628                         tg3_read_dash_ver(tp);
13629         } else if (tg3_flag(tp, ENABLE_ASF)) {
13630                 tg3_read_mgmtfw_ver(tp);
13631         }
13632
13633 done:
13634         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13635 }
13636
13637 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13638
13639 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13640 {
13641         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13642                 return TG3_RX_RET_MAX_SIZE_5717;
13643         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13644                 return TG3_RX_RET_MAX_SIZE_5700;
13645         else
13646                 return TG3_RX_RET_MAX_SIZE_5705;
13647 }
13648
13649 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13650         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13651         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13652         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13653         { },
13654 };
13655
13656 static int __devinit tg3_get_invariants(struct tg3 *tp)
13657 {
13658         u32 misc_ctrl_reg;
13659         u32 pci_state_reg, grc_misc_cfg;
13660         u32 val;
13661         u16 pci_cmd;
13662         int err;
13663
13664         /* Force memory write invalidate off.  If we leave it on,
13665          * then on 5700_BX chips we have to enable a workaround.
13666          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13667          * to match the cacheline size.  The Broadcom driver have this
13668          * workaround but turns MWI off all the times so never uses
13669          * it.  This seems to suggest that the workaround is insufficient.
13670          */
13671         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13672         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13673         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13674
13675         /* Important! -- Make sure register accesses are byteswapped
13676          * correctly.  Also, for those chips that require it, make
13677          * sure that indirect register accesses are enabled before
13678          * the first operation.
13679          */
13680         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13681                               &misc_ctrl_reg);
13682         tp->misc_host_ctrl |= (misc_ctrl_reg &
13683                                MISC_HOST_CTRL_CHIPREV);
13684         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13685                                tp->misc_host_ctrl);
13686
13687         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13688                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13690                 u32 prod_id_asic_rev;
13691
13692                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13693                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13694                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13695                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13696                         pci_read_config_dword(tp->pdev,
13697                                               TG3PCI_GEN2_PRODID_ASICREV,
13698                                               &prod_id_asic_rev);
13699                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13700                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13701                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13702                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13703                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13704                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13705                         pci_read_config_dword(tp->pdev,
13706                                               TG3PCI_GEN15_PRODID_ASICREV,
13707                                               &prod_id_asic_rev);
13708                 else
13709                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13710                                               &prod_id_asic_rev);
13711
13712                 tp->pci_chip_rev_id = prod_id_asic_rev;
13713         }
13714
13715         /* Wrong chip ID in 5752 A0. This code can be removed later
13716          * as A0 is not in production.
13717          */
13718         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13719                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13720
13721         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13722          * we need to disable memory and use config. cycles
13723          * only to access all registers. The 5702/03 chips
13724          * can mistakenly decode the special cycles from the
13725          * ICH chipsets as memory write cycles, causing corruption
13726          * of register and memory space. Only certain ICH bridges
13727          * will drive special cycles with non-zero data during the
13728          * address phase which can fall within the 5703's address
13729          * range. This is not an ICH bug as the PCI spec allows
13730          * non-zero address during special cycles. However, only
13731          * these ICH bridges are known to drive non-zero addresses
13732          * during special cycles.
13733          *
13734          * Since special cycles do not cross PCI bridges, we only
13735          * enable this workaround if the 5703 is on the secondary
13736          * bus of these ICH bridges.
13737          */
13738         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13739             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13740                 static struct tg3_dev_id {
13741                         u32     vendor;
13742                         u32     device;
13743                         u32     rev;
13744                 } ich_chipsets[] = {
13745                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13746                           PCI_ANY_ID },
13747                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13748                           PCI_ANY_ID },
13749                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13750                           0xa },
13751                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13752                           PCI_ANY_ID },
13753                         { },
13754                 };
13755                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13756                 struct pci_dev *bridge = NULL;
13757
13758                 while (pci_id->vendor != 0) {
13759                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13760                                                 bridge);
13761                         if (!bridge) {
13762                                 pci_id++;
13763                                 continue;
13764                         }
13765                         if (pci_id->rev != PCI_ANY_ID) {
13766                                 if (bridge->revision > pci_id->rev)
13767                                         continue;
13768                         }
13769                         if (bridge->subordinate &&
13770                             (bridge->subordinate->number ==
13771                              tp->pdev->bus->number)) {
13772                                 tg3_flag_set(tp, ICH_WORKAROUND);
13773                                 pci_dev_put(bridge);
13774                                 break;
13775                         }
13776                 }
13777         }
13778
13779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13780                 static struct tg3_dev_id {
13781                         u32     vendor;
13782                         u32     device;
13783                 } bridge_chipsets[] = {
13784                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13785                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13786                         { },
13787                 };
13788                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13789                 struct pci_dev *bridge = NULL;
13790
13791                 while (pci_id->vendor != 0) {
13792                         bridge = pci_get_device(pci_id->vendor,
13793                                                 pci_id->device,
13794                                                 bridge);
13795                         if (!bridge) {
13796                                 pci_id++;
13797                                 continue;
13798                         }
13799                         if (bridge->subordinate &&
13800                             (bridge->subordinate->number <=
13801                              tp->pdev->bus->number) &&
13802                             (bridge->subordinate->subordinate >=
13803                              tp->pdev->bus->number)) {
13804                                 tg3_flag_set(tp, 5701_DMA_BUG);
13805                                 pci_dev_put(bridge);
13806                                 break;
13807                         }
13808                 }
13809         }
13810
13811         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13812          * DMA addresses > 40-bit. This bridge may have other additional
13813          * 57xx devices behind it in some 4-port NIC designs for example.
13814          * Any tg3 device found behind the bridge will also need the 40-bit
13815          * DMA workaround.
13816          */
13817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13818             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13819                 tg3_flag_set(tp, 5780_CLASS);
13820                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13821                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13822         } else {
13823                 struct pci_dev *bridge = NULL;
13824
13825                 do {
13826                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13827                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13828                                                 bridge);
13829                         if (bridge && bridge->subordinate &&
13830                             (bridge->subordinate->number <=
13831                              tp->pdev->bus->number) &&
13832                             (bridge->subordinate->subordinate >=
13833                              tp->pdev->bus->number)) {
13834                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13835                                 pci_dev_put(bridge);
13836                                 break;
13837                         }
13838                 } while (bridge);
13839         }
13840
13841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13843                 tp->pdev_peer = tg3_find_peer(tp);
13844
13845         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13848                 tg3_flag_set(tp, 5717_PLUS);
13849
13850         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13851             tg3_flag(tp, 5717_PLUS))
13852                 tg3_flag_set(tp, 57765_PLUS);
13853
13854         /* Intentionally exclude ASIC_REV_5906 */
13855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13856             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13860             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13861             tg3_flag(tp, 57765_PLUS))
13862                 tg3_flag_set(tp, 5755_PLUS);
13863
13864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13865             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13866             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13867             tg3_flag(tp, 5755_PLUS) ||
13868             tg3_flag(tp, 5780_CLASS))
13869                 tg3_flag_set(tp, 5750_PLUS);
13870
13871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13872             tg3_flag(tp, 5750_PLUS))
13873                 tg3_flag_set(tp, 5705_PLUS);
13874
13875         /* Determine TSO capabilities */
13876         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13877                 ; /* Do nothing. HW bug. */
13878         else if (tg3_flag(tp, 57765_PLUS))
13879                 tg3_flag_set(tp, HW_TSO_3);
13880         else if (tg3_flag(tp, 5755_PLUS) ||
13881                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13882                 tg3_flag_set(tp, HW_TSO_2);
13883         else if (tg3_flag(tp, 5750_PLUS)) {
13884                 tg3_flag_set(tp, HW_TSO_1);
13885                 tg3_flag_set(tp, TSO_BUG);
13886                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13887                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13888                         tg3_flag_clear(tp, TSO_BUG);
13889         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13890                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13891                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13892                         tg3_flag_set(tp, TSO_BUG);
13893                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13894                         tp->fw_needed = FIRMWARE_TG3TSO5;
13895                 else
13896                         tp->fw_needed = FIRMWARE_TG3TSO;
13897         }
13898
13899         /* Selectively allow TSO based on operating conditions */
13900         if (tg3_flag(tp, HW_TSO_1) ||
13901             tg3_flag(tp, HW_TSO_2) ||
13902             tg3_flag(tp, HW_TSO_3) ||
13903             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13904                 tg3_flag_set(tp, TSO_CAPABLE);
13905         else {
13906                 tg3_flag_clear(tp, TSO_CAPABLE);
13907                 tg3_flag_clear(tp, TSO_BUG);
13908                 tp->fw_needed = NULL;
13909         }
13910
13911         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13912                 tp->fw_needed = FIRMWARE_TG3;
13913
13914         tp->irq_max = 1;
13915
13916         if (tg3_flag(tp, 5750_PLUS)) {
13917                 tg3_flag_set(tp, SUPPORT_MSI);
13918                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13919                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13920                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13921                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13922                      tp->pdev_peer == tp->pdev))
13923                         tg3_flag_clear(tp, SUPPORT_MSI);
13924
13925                 if (tg3_flag(tp, 5755_PLUS) ||
13926                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13927                         tg3_flag_set(tp, 1SHOT_MSI);
13928                 }
13929
13930                 if (tg3_flag(tp, 57765_PLUS)) {
13931                         tg3_flag_set(tp, SUPPORT_MSIX);
13932                         tp->irq_max = TG3_IRQ_MAX_VECS;
13933                 }
13934         }
13935
13936         if (tg3_flag(tp, 5755_PLUS))
13937                 tg3_flag_set(tp, SHORT_DMA_BUG);
13938
13939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13940                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
13941
13942         if (tg3_flag(tp, 5717_PLUS))
13943                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13944
13945         if (tg3_flag(tp, 57765_PLUS) &&
13946             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
13947                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13948
13949         if (!tg3_flag(tp, 5705_PLUS) ||
13950             tg3_flag(tp, 5780_CLASS) ||
13951             tg3_flag(tp, USE_JUMBO_BDFLAG))
13952                 tg3_flag_set(tp, JUMBO_CAPABLE);
13953
13954         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13955                               &pci_state_reg);
13956
13957         if (pci_is_pcie(tp->pdev)) {
13958                 u16 lnkctl;
13959
13960                 tg3_flag_set(tp, PCI_EXPRESS);
13961
13962                 tp->pcie_readrq = 4096;
13963                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13964                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13965                         tp->pcie_readrq = 2048;
13966
13967                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13968
13969                 pci_read_config_word(tp->pdev,
13970                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13971                                      &lnkctl);
13972                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13973                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13974                             ASIC_REV_5906) {
13975                                 tg3_flag_clear(tp, HW_TSO_2);
13976                                 tg3_flag_clear(tp, TSO_CAPABLE);
13977                         }
13978                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13979                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13980                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13981                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13982                                 tg3_flag_set(tp, CLKREQ_BUG);
13983                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13984                         tg3_flag_set(tp, L1PLLPD_EN);
13985                 }
13986         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13987                 /* BCM5785 devices are effectively PCIe devices, and should
13988                  * follow PCIe codepaths, but do not have a PCIe capabilities
13989                  * section.
13990                 */
13991                 tg3_flag_set(tp, PCI_EXPRESS);
13992         } else if (!tg3_flag(tp, 5705_PLUS) ||
13993                    tg3_flag(tp, 5780_CLASS)) {
13994                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13995                 if (!tp->pcix_cap) {
13996                         dev_err(&tp->pdev->dev,
13997                                 "Cannot find PCI-X capability, aborting\n");
13998                         return -EIO;
13999                 }
14000
14001                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14002                         tg3_flag_set(tp, PCIX_MODE);
14003         }
14004
14005         /* If we have an AMD 762 or VIA K8T800 chipset, write
14006          * reordering to the mailbox registers done by the host
14007          * controller can cause major troubles.  We read back from
14008          * every mailbox register write to force the writes to be
14009          * posted to the chip in order.
14010          */
14011         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14012             !tg3_flag(tp, PCI_EXPRESS))
14013                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14014
14015         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14016                              &tp->pci_cacheline_sz);
14017         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14018                              &tp->pci_lat_timer);
14019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14020             tp->pci_lat_timer < 64) {
14021                 tp->pci_lat_timer = 64;
14022                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14023                                       tp->pci_lat_timer);
14024         }
14025
14026         /* Important! -- It is critical that the PCI-X hw workaround
14027          * situation is decided before the first MMIO register access.
14028          */
14029         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14030                 /* 5700 BX chips need to have their TX producer index
14031                  * mailboxes written twice to workaround a bug.
14032                  */
14033                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14034
14035                 /* If we are in PCI-X mode, enable register write workaround.
14036                  *
14037                  * The workaround is to use indirect register accesses
14038                  * for all chip writes not to mailbox registers.
14039                  */
14040                 if (tg3_flag(tp, PCIX_MODE)) {
14041                         u32 pm_reg;
14042
14043                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14044
14045                         /* The chip can have it's power management PCI config
14046                          * space registers clobbered due to this bug.
14047                          * So explicitly force the chip into D0 here.
14048                          */
14049                         pci_read_config_dword(tp->pdev,
14050                                               tp->pm_cap + PCI_PM_CTRL,
14051                                               &pm_reg);
14052                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14053                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14054                         pci_write_config_dword(tp->pdev,
14055                                                tp->pm_cap + PCI_PM_CTRL,
14056                                                pm_reg);
14057
14058                         /* Also, force SERR#/PERR# in PCI command. */
14059                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14060                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14061                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14062                 }
14063         }
14064
14065         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14066                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14067         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14068                 tg3_flag_set(tp, PCI_32BIT);
14069
14070         /* Chip-specific fixup from Broadcom driver */
14071         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14072             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14073                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14074                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14075         }
14076
14077         /* Default fast path register access methods */
14078         tp->read32 = tg3_read32;
14079         tp->write32 = tg3_write32;
14080         tp->read32_mbox = tg3_read32;
14081         tp->write32_mbox = tg3_write32;
14082         tp->write32_tx_mbox = tg3_write32;
14083         tp->write32_rx_mbox = tg3_write32;
14084
14085         /* Various workaround register access methods */
14086         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14087                 tp->write32 = tg3_write_indirect_reg32;
14088         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14089                  (tg3_flag(tp, PCI_EXPRESS) &&
14090                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14091                 /*
14092                  * Back to back register writes can cause problems on these
14093                  * chips, the workaround is to read back all reg writes
14094                  * except those to mailbox regs.
14095                  *
14096                  * See tg3_write_indirect_reg32().
14097                  */
14098                 tp->write32 = tg3_write_flush_reg32;
14099         }
14100
14101         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14102                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14103                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14104                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14105         }
14106
14107         if (tg3_flag(tp, ICH_WORKAROUND)) {
14108                 tp->read32 = tg3_read_indirect_reg32;
14109                 tp->write32 = tg3_write_indirect_reg32;
14110                 tp->read32_mbox = tg3_read_indirect_mbox;
14111                 tp->write32_mbox = tg3_write_indirect_mbox;
14112                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14113                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14114
14115                 iounmap(tp->regs);
14116                 tp->regs = NULL;
14117
14118                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14119                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14120                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14121         }
14122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14123                 tp->read32_mbox = tg3_read32_mbox_5906;
14124                 tp->write32_mbox = tg3_write32_mbox_5906;
14125                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14126                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14127         }
14128
14129         if (tp->write32 == tg3_write_indirect_reg32 ||
14130             (tg3_flag(tp, PCIX_MODE) &&
14131              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14132               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14133                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14134
14135         /* The memory arbiter has to be enabled in order for SRAM accesses
14136          * to succeed.  Normally on powerup the tg3 chip firmware will make
14137          * sure it is enabled, but other entities such as system netboot
14138          * code might disable it.
14139          */
14140         val = tr32(MEMARB_MODE);
14141         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14142
14143         if (tg3_flag(tp, PCIX_MODE)) {
14144                 pci_read_config_dword(tp->pdev,
14145                                       tp->pcix_cap + PCI_X_STATUS, &val);
14146                 tp->pci_fn = val & 0x7;
14147         } else {
14148                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14149         }
14150
14151         /* Get eeprom hw config before calling tg3_set_power_state().
14152          * In particular, the TG3_FLAG_IS_NIC flag must be
14153          * determined before calling tg3_set_power_state() so that
14154          * we know whether or not to switch out of Vaux power.
14155          * When the flag is set, it means that GPIO1 is used for eeprom
14156          * write protect and also implies that it is a LOM where GPIOs
14157          * are not used to switch power.
14158          */
14159         tg3_get_eeprom_hw_cfg(tp);
14160
14161         if (tg3_flag(tp, ENABLE_APE)) {
14162                 /* Allow reads and writes to the
14163                  * APE register and memory space.
14164                  */
14165                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14166                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14167                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14168                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14169                                        pci_state_reg);
14170
14171                 tg3_ape_lock_init(tp);
14172         }
14173
14174         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14177             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14178             tg3_flag(tp, 57765_PLUS))
14179                 tg3_flag_set(tp, CPMU_PRESENT);
14180
14181         /* Set up tp->grc_local_ctrl before calling
14182          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14183          * will bring 5700's external PHY out of reset.
14184          * It is also used as eeprom write protect on LOMs.
14185          */
14186         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14188             tg3_flag(tp, EEPROM_WRITE_PROT))
14189                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14190                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14191         /* Unused GPIO3 must be driven as output on 5752 because there
14192          * are no pull-up resistors on unused GPIO pins.
14193          */
14194         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14195                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14196
14197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14198             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14199             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14200                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14201
14202         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14203             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14204                 /* Turn off the debug UART. */
14205                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14206                 if (tg3_flag(tp, IS_NIC))
14207                         /* Keep VMain power. */
14208                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14209                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14210         }
14211
14212         /* Switch out of Vaux if it is a NIC */
14213         tg3_pwrsrc_switch_to_vmain(tp);
14214
14215         /* Derive initial jumbo mode from MTU assigned in
14216          * ether_setup() via the alloc_etherdev() call
14217          */
14218         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14219                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14220
14221         /* Determine WakeOnLan speed to use. */
14222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14223             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14224             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14225             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14226                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14227         } else {
14228                 tg3_flag_set(tp, WOL_SPEED_100MB);
14229         }
14230
14231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14232                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14233
14234         /* A few boards don't want Ethernet@WireSpeed phy feature */
14235         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14236             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14237              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14238              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14239             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14240             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14241                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14242
14243         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14244             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14245                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14246         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14247                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14248
14249         if (tg3_flag(tp, 5705_PLUS) &&
14250             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14251             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14252             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14253             !tg3_flag(tp, 57765_PLUS)) {
14254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14255                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14256                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14257                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14258                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14259                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14260                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14261                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14262                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14263                 } else
14264                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14265         }
14266
14267         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14268             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14269                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14270                 if (tp->phy_otp == 0)
14271                         tp->phy_otp = TG3_OTP_DEFAULT;
14272         }
14273
14274         if (tg3_flag(tp, CPMU_PRESENT))
14275                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14276         else
14277                 tp->mi_mode = MAC_MI_MODE_BASE;
14278
14279         tp->coalesce_mode = 0;
14280         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14281             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14282                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14283
14284         /* Set these bits to enable statistics workaround. */
14285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14286             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14287             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14288                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14289                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14290         }
14291
14292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14293             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14294                 tg3_flag_set(tp, USE_PHYLIB);
14295
14296         err = tg3_mdio_init(tp);
14297         if (err)
14298                 return err;
14299
14300         /* Initialize data/descriptor byte/word swapping. */
14301         val = tr32(GRC_MODE);
14302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14303                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14304                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14305                         GRC_MODE_B2HRX_ENABLE |
14306                         GRC_MODE_HTX2B_ENABLE |
14307                         GRC_MODE_HOST_STACKUP);
14308         else
14309                 val &= GRC_MODE_HOST_STACKUP;
14310
14311         tw32(GRC_MODE, val | tp->grc_mode);
14312
14313         tg3_switch_clocks(tp);
14314
14315         /* Clear this out for sanity. */
14316         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14317
14318         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14319                               &pci_state_reg);
14320         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14321             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14322                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14323
14324                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14325                     chiprevid == CHIPREV_ID_5701_B0 ||
14326                     chiprevid == CHIPREV_ID_5701_B2 ||
14327                     chiprevid == CHIPREV_ID_5701_B5) {
14328                         void __iomem *sram_base;
14329
14330                         /* Write some dummy words into the SRAM status block
14331                          * area, see if it reads back correctly.  If the return
14332                          * value is bad, force enable the PCIX workaround.
14333                          */
14334                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14335
14336                         writel(0x00000000, sram_base);
14337                         writel(0x00000000, sram_base + 4);
14338                         writel(0xffffffff, sram_base + 4);
14339                         if (readl(sram_base) != 0x00000000)
14340                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14341                 }
14342         }
14343
14344         udelay(50);
14345         tg3_nvram_init(tp);
14346
14347         grc_misc_cfg = tr32(GRC_MISC_CFG);
14348         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14349
14350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14351             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14352              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14353                 tg3_flag_set(tp, IS_5788);
14354
14355         if (!tg3_flag(tp, IS_5788) &&
14356             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14357                 tg3_flag_set(tp, TAGGED_STATUS);
14358         if (tg3_flag(tp, TAGGED_STATUS)) {
14359                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14360                                       HOSTCC_MODE_CLRTICK_TXBD);
14361
14362                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14363                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14364                                        tp->misc_host_ctrl);
14365         }
14366
14367         /* Preserve the APE MAC_MODE bits */
14368         if (tg3_flag(tp, ENABLE_APE))
14369                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14370         else
14371                 tp->mac_mode = TG3_DEF_MAC_MODE;
14372
14373         /* these are limited to 10/100 only */
14374         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14375              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14376             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14377              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14378              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14379               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14380               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14381             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14382              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14383               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14384               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14385             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14386             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14387             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14388             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14389                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14390
14391         err = tg3_phy_probe(tp);
14392         if (err) {
14393                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14394                 /* ... but do not return immediately ... */
14395                 tg3_mdio_fini(tp);
14396         }
14397
14398         tg3_read_vpd(tp);
14399         tg3_read_fw_ver(tp);
14400
14401         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14402                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14403         } else {
14404                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14405                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14406                 else
14407                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14408         }
14409
14410         /* 5700 {AX,BX} chips have a broken status block link
14411          * change bit implementation, so we must use the
14412          * status register in those cases.
14413          */
14414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14415                 tg3_flag_set(tp, USE_LINKCHG_REG);
14416         else
14417                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14418
14419         /* The led_ctrl is set during tg3_phy_probe, here we might
14420          * have to force the link status polling mechanism based
14421          * upon subsystem IDs.
14422          */
14423         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14424             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14425             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14426                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14427                 tg3_flag_set(tp, USE_LINKCHG_REG);
14428         }
14429
14430         /* For all SERDES we poll the MAC status register. */
14431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14432                 tg3_flag_set(tp, POLL_SERDES);
14433         else
14434                 tg3_flag_clear(tp, POLL_SERDES);
14435
14436         tp->rx_offset = NET_IP_ALIGN;
14437         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14439             tg3_flag(tp, PCIX_MODE)) {
14440                 tp->rx_offset = 0;
14441 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14442                 tp->rx_copy_thresh = ~(u16)0;
14443 #endif
14444         }
14445
14446         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14447         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14448         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14449
14450         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14451
14452         /* Increment the rx prod index on the rx std ring by at most
14453          * 8 for these chips to workaround hw errata.
14454          */
14455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14456             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14457             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14458                 tp->rx_std_max_post = 8;
14459
14460         if (tg3_flag(tp, ASPM_WORKAROUND))
14461                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14462                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14463
14464         return err;
14465 }
14466
14467 #ifdef CONFIG_SPARC
14468 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14469 {
14470         struct net_device *dev = tp->dev;
14471         struct pci_dev *pdev = tp->pdev;
14472         struct device_node *dp = pci_device_to_OF_node(pdev);
14473         const unsigned char *addr;
14474         int len;
14475
14476         addr = of_get_property(dp, "local-mac-address", &len);
14477         if (addr && len == 6) {
14478                 memcpy(dev->dev_addr, addr, 6);
14479                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14480                 return 0;
14481         }
14482         return -ENODEV;
14483 }
14484
14485 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14486 {
14487         struct net_device *dev = tp->dev;
14488
14489         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14490         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14491         return 0;
14492 }
14493 #endif
14494
14495 static int __devinit tg3_get_device_address(struct tg3 *tp)
14496 {
14497         struct net_device *dev = tp->dev;
14498         u32 hi, lo, mac_offset;
14499         int addr_ok = 0;
14500
14501 #ifdef CONFIG_SPARC
14502         if (!tg3_get_macaddr_sparc(tp))
14503                 return 0;
14504 #endif
14505
14506         mac_offset = 0x7c;
14507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14508             tg3_flag(tp, 5780_CLASS)) {
14509                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14510                         mac_offset = 0xcc;
14511                 if (tg3_nvram_lock(tp))
14512                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14513                 else
14514                         tg3_nvram_unlock(tp);
14515         } else if (tg3_flag(tp, 5717_PLUS)) {
14516                 if (tp->pci_fn & 1)
14517                         mac_offset = 0xcc;
14518                 if (tp->pci_fn > 1)
14519                         mac_offset += 0x18c;
14520         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14521                 mac_offset = 0x10;
14522
14523         /* First try to get it from MAC address mailbox. */
14524         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14525         if ((hi >> 16) == 0x484b) {
14526                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14527                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14528
14529                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14530                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14531                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14532                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14533                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14534
14535                 /* Some old bootcode may report a 0 MAC address in SRAM */
14536                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14537         }
14538         if (!addr_ok) {
14539                 /* Next, try NVRAM. */
14540                 if (!tg3_flag(tp, NO_NVRAM) &&
14541                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14542                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14543                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14544                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14545                 }
14546                 /* Finally just fetch it out of the MAC control regs. */
14547                 else {
14548                         hi = tr32(MAC_ADDR_0_HIGH);
14549                         lo = tr32(MAC_ADDR_0_LOW);
14550
14551                         dev->dev_addr[5] = lo & 0xff;
14552                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14553                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14554                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14555                         dev->dev_addr[1] = hi & 0xff;
14556                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14557                 }
14558         }
14559
14560         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14561 #ifdef CONFIG_SPARC
14562                 if (!tg3_get_default_macaddr_sparc(tp))
14563                         return 0;
14564 #endif
14565                 return -EINVAL;
14566         }
14567         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14568         return 0;
14569 }
14570
14571 #define BOUNDARY_SINGLE_CACHELINE       1
14572 #define BOUNDARY_MULTI_CACHELINE        2
14573
14574 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14575 {
14576         int cacheline_size;
14577         u8 byte;
14578         int goal;
14579
14580         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14581         if (byte == 0)
14582                 cacheline_size = 1024;
14583         else
14584                 cacheline_size = (int) byte * 4;
14585
14586         /* On 5703 and later chips, the boundary bits have no
14587          * effect.
14588          */
14589         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14590             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14591             !tg3_flag(tp, PCI_EXPRESS))
14592                 goto out;
14593
14594 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14595         goal = BOUNDARY_MULTI_CACHELINE;
14596 #else
14597 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14598         goal = BOUNDARY_SINGLE_CACHELINE;
14599 #else
14600         goal = 0;
14601 #endif
14602 #endif
14603
14604         if (tg3_flag(tp, 57765_PLUS)) {
14605                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14606                 goto out;
14607         }
14608
14609         if (!goal)
14610                 goto out;
14611
14612         /* PCI controllers on most RISC systems tend to disconnect
14613          * when a device tries to burst across a cache-line boundary.
14614          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14615          *
14616          * Unfortunately, for PCI-E there are only limited
14617          * write-side controls for this, and thus for reads
14618          * we will still get the disconnects.  We'll also waste
14619          * these PCI cycles for both read and write for chips
14620          * other than 5700 and 5701 which do not implement the
14621          * boundary bits.
14622          */
14623         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14624                 switch (cacheline_size) {
14625                 case 16:
14626                 case 32:
14627                 case 64:
14628                 case 128:
14629                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14630                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14631                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14632                         } else {
14633                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14634                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14635                         }
14636                         break;
14637
14638                 case 256:
14639                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14640                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14641                         break;
14642
14643                 default:
14644                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14645                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14646                         break;
14647                 }
14648         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14649                 switch (cacheline_size) {
14650                 case 16:
14651                 case 32:
14652                 case 64:
14653                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14654                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14655                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14656                                 break;
14657                         }
14658                         /* fallthrough */
14659                 case 128:
14660                 default:
14661                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14662                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14663                         break;
14664                 }
14665         } else {
14666                 switch (cacheline_size) {
14667                 case 16:
14668                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14669                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14670                                         DMA_RWCTRL_WRITE_BNDRY_16);
14671                                 break;
14672                         }
14673                         /* fallthrough */
14674                 case 32:
14675                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14676                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14677                                         DMA_RWCTRL_WRITE_BNDRY_32);
14678                                 break;
14679                         }
14680                         /* fallthrough */
14681                 case 64:
14682                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14683                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14684                                         DMA_RWCTRL_WRITE_BNDRY_64);
14685                                 break;
14686                         }
14687                         /* fallthrough */
14688                 case 128:
14689                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14690                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14691                                         DMA_RWCTRL_WRITE_BNDRY_128);
14692                                 break;
14693                         }
14694                         /* fallthrough */
14695                 case 256:
14696                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14697                                 DMA_RWCTRL_WRITE_BNDRY_256);
14698                         break;
14699                 case 512:
14700                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14701                                 DMA_RWCTRL_WRITE_BNDRY_512);
14702                         break;
14703                 case 1024:
14704                 default:
14705                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14706                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14707                         break;
14708                 }
14709         }
14710
14711 out:
14712         return val;
14713 }
14714
14715 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14716 {
14717         struct tg3_internal_buffer_desc test_desc;
14718         u32 sram_dma_descs;
14719         int i, ret;
14720
14721         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14722
14723         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14724         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14725         tw32(RDMAC_STATUS, 0);
14726         tw32(WDMAC_STATUS, 0);
14727
14728         tw32(BUFMGR_MODE, 0);
14729         tw32(FTQ_RESET, 0);
14730
14731         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14732         test_desc.addr_lo = buf_dma & 0xffffffff;
14733         test_desc.nic_mbuf = 0x00002100;
14734         test_desc.len = size;
14735
14736         /*
14737          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14738          * the *second* time the tg3 driver was getting loaded after an
14739          * initial scan.
14740          *
14741          * Broadcom tells me:
14742          *   ...the DMA engine is connected to the GRC block and a DMA
14743          *   reset may affect the GRC block in some unpredictable way...
14744          *   The behavior of resets to individual blocks has not been tested.
14745          *
14746          * Broadcom noted the GRC reset will also reset all sub-components.
14747          */
14748         if (to_device) {
14749                 test_desc.cqid_sqid = (13 << 8) | 2;
14750
14751                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14752                 udelay(40);
14753         } else {
14754                 test_desc.cqid_sqid = (16 << 8) | 7;
14755
14756                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14757                 udelay(40);
14758         }
14759         test_desc.flags = 0x00000005;
14760
14761         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14762                 u32 val;
14763
14764                 val = *(((u32 *)&test_desc) + i);
14765                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14766                                        sram_dma_descs + (i * sizeof(u32)));
14767                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14768         }
14769         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14770
14771         if (to_device)
14772                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14773         else
14774                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14775
14776         ret = -ENODEV;
14777         for (i = 0; i < 40; i++) {
14778                 u32 val;
14779
14780                 if (to_device)
14781                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14782                 else
14783                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14784                 if ((val & 0xffff) == sram_dma_descs) {
14785                         ret = 0;
14786                         break;
14787                 }
14788
14789                 udelay(100);
14790         }
14791
14792         return ret;
14793 }
14794
14795 #define TEST_BUFFER_SIZE        0x2000
14796
14797 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14798         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14799         { },
14800 };
14801
14802 static int __devinit tg3_test_dma(struct tg3 *tp)
14803 {
14804         dma_addr_t buf_dma;
14805         u32 *buf, saved_dma_rwctrl;
14806         int ret = 0;
14807
14808         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14809                                  &buf_dma, GFP_KERNEL);
14810         if (!buf) {
14811                 ret = -ENOMEM;
14812                 goto out_nofree;
14813         }
14814
14815         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14816                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14817
14818         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14819
14820         if (tg3_flag(tp, 57765_PLUS))
14821                 goto out;
14822
14823         if (tg3_flag(tp, PCI_EXPRESS)) {
14824                 /* DMA read watermark not used on PCIE */
14825                 tp->dma_rwctrl |= 0x00180000;
14826         } else if (!tg3_flag(tp, PCIX_MODE)) {
14827                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14828                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14829                         tp->dma_rwctrl |= 0x003f0000;
14830                 else
14831                         tp->dma_rwctrl |= 0x003f000f;
14832         } else {
14833                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14834                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14835                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14836                         u32 read_water = 0x7;
14837
14838                         /* If the 5704 is behind the EPB bridge, we can
14839                          * do the less restrictive ONE_DMA workaround for
14840                          * better performance.
14841                          */
14842                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14843                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14844                                 tp->dma_rwctrl |= 0x8000;
14845                         else if (ccval == 0x6 || ccval == 0x7)
14846                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14847
14848                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14849                                 read_water = 4;
14850                         /* Set bit 23 to enable PCIX hw bug fix */
14851                         tp->dma_rwctrl |=
14852                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14853                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14854                                 (1 << 23);
14855                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14856                         /* 5780 always in PCIX mode */
14857                         tp->dma_rwctrl |= 0x00144000;
14858                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14859                         /* 5714 always in PCIX mode */
14860                         tp->dma_rwctrl |= 0x00148000;
14861                 } else {
14862                         tp->dma_rwctrl |= 0x001b000f;
14863                 }
14864         }
14865
14866         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14867             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14868                 tp->dma_rwctrl &= 0xfffffff0;
14869
14870         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14871             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14872                 /* Remove this if it causes problems for some boards. */
14873                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14874
14875                 /* On 5700/5701 chips, we need to set this bit.
14876                  * Otherwise the chip will issue cacheline transactions
14877                  * to streamable DMA memory with not all the byte
14878                  * enables turned on.  This is an error on several
14879                  * RISC PCI controllers, in particular sparc64.
14880                  *
14881                  * On 5703/5704 chips, this bit has been reassigned
14882                  * a different meaning.  In particular, it is used
14883                  * on those chips to enable a PCI-X workaround.
14884                  */
14885                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14886         }
14887
14888         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14889
14890 #if 0
14891         /* Unneeded, already done by tg3_get_invariants.  */
14892         tg3_switch_clocks(tp);
14893 #endif
14894
14895         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14896             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14897                 goto out;
14898
14899         /* It is best to perform DMA test with maximum write burst size
14900          * to expose the 5700/5701 write DMA bug.
14901          */
14902         saved_dma_rwctrl = tp->dma_rwctrl;
14903         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14904         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14905
14906         while (1) {
14907                 u32 *p = buf, i;
14908
14909                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14910                         p[i] = i;
14911
14912                 /* Send the buffer to the chip. */
14913                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14914                 if (ret) {
14915                         dev_err(&tp->pdev->dev,
14916                                 "%s: Buffer write failed. err = %d\n",
14917                                 __func__, ret);
14918                         break;
14919                 }
14920
14921 #if 0
14922                 /* validate data reached card RAM correctly. */
14923                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14924                         u32 val;
14925                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14926                         if (le32_to_cpu(val) != p[i]) {
14927                                 dev_err(&tp->pdev->dev,
14928                                         "%s: Buffer corrupted on device! "
14929                                         "(%d != %d)\n", __func__, val, i);
14930                                 /* ret = -ENODEV here? */
14931                         }
14932                         p[i] = 0;
14933                 }
14934 #endif
14935                 /* Now read it back. */
14936                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14937                 if (ret) {
14938                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14939                                 "err = %d\n", __func__, ret);
14940                         break;
14941                 }
14942
14943                 /* Verify it. */
14944                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14945                         if (p[i] == i)
14946                                 continue;
14947
14948                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14949                             DMA_RWCTRL_WRITE_BNDRY_16) {
14950                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14951                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14952                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14953                                 break;
14954                         } else {
14955                                 dev_err(&tp->pdev->dev,
14956                                         "%s: Buffer corrupted on read back! "
14957                                         "(%d != %d)\n", __func__, p[i], i);
14958                                 ret = -ENODEV;
14959                                 goto out;
14960                         }
14961                 }
14962
14963                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14964                         /* Success. */
14965                         ret = 0;
14966                         break;
14967                 }
14968         }
14969         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14970             DMA_RWCTRL_WRITE_BNDRY_16) {
14971                 /* DMA test passed without adjusting DMA boundary,
14972                  * now look for chipsets that are known to expose the
14973                  * DMA bug without failing the test.
14974                  */
14975                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14976                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14977                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14978                 } else {
14979                         /* Safe to use the calculated DMA boundary. */
14980                         tp->dma_rwctrl = saved_dma_rwctrl;
14981                 }
14982
14983                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14984         }
14985
14986 out:
14987         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14988 out_nofree:
14989         return ret;
14990 }
14991
14992 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14993 {
14994         if (tg3_flag(tp, 57765_PLUS)) {
14995                 tp->bufmgr_config.mbuf_read_dma_low_water =
14996                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14997                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14998                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14999                 tp->bufmgr_config.mbuf_high_water =
15000                         DEFAULT_MB_HIGH_WATER_57765;
15001
15002                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15003                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15004                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15005                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15006                 tp->bufmgr_config.mbuf_high_water_jumbo =
15007                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15008         } else if (tg3_flag(tp, 5705_PLUS)) {
15009                 tp->bufmgr_config.mbuf_read_dma_low_water =
15010                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15011                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15012                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15013                 tp->bufmgr_config.mbuf_high_water =
15014                         DEFAULT_MB_HIGH_WATER_5705;
15015                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15016                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15017                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15018                         tp->bufmgr_config.mbuf_high_water =
15019                                 DEFAULT_MB_HIGH_WATER_5906;
15020                 }
15021
15022                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15023                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15024                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15025                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15026                 tp->bufmgr_config.mbuf_high_water_jumbo =
15027                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15028         } else {
15029                 tp->bufmgr_config.mbuf_read_dma_low_water =
15030                         DEFAULT_MB_RDMA_LOW_WATER;
15031                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15032                         DEFAULT_MB_MACRX_LOW_WATER;
15033                 tp->bufmgr_config.mbuf_high_water =
15034                         DEFAULT_MB_HIGH_WATER;
15035
15036                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15037                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15038                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15039                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15040                 tp->bufmgr_config.mbuf_high_water_jumbo =
15041                         DEFAULT_MB_HIGH_WATER_JUMBO;
15042         }
15043
15044         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15045         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15046 }
15047
15048 static char * __devinit tg3_phy_string(struct tg3 *tp)
15049 {
15050         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15051         case TG3_PHY_ID_BCM5400:        return "5400";
15052         case TG3_PHY_ID_BCM5401:        return "5401";
15053         case TG3_PHY_ID_BCM5411:        return "5411";
15054         case TG3_PHY_ID_BCM5701:        return "5701";
15055         case TG3_PHY_ID_BCM5703:        return "5703";
15056         case TG3_PHY_ID_BCM5704:        return "5704";
15057         case TG3_PHY_ID_BCM5705:        return "5705";
15058         case TG3_PHY_ID_BCM5750:        return "5750";
15059         case TG3_PHY_ID_BCM5752:        return "5752";
15060         case TG3_PHY_ID_BCM5714:        return "5714";
15061         case TG3_PHY_ID_BCM5780:        return "5780";
15062         case TG3_PHY_ID_BCM5755:        return "5755";
15063         case TG3_PHY_ID_BCM5787:        return "5787";
15064         case TG3_PHY_ID_BCM5784:        return "5784";
15065         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15066         case TG3_PHY_ID_BCM5906:        return "5906";
15067         case TG3_PHY_ID_BCM5761:        return "5761";
15068         case TG3_PHY_ID_BCM5718C:       return "5718C";
15069         case TG3_PHY_ID_BCM5718S:       return "5718S";
15070         case TG3_PHY_ID_BCM57765:       return "57765";
15071         case TG3_PHY_ID_BCM5719C:       return "5719C";
15072         case TG3_PHY_ID_BCM5720C:       return "5720C";
15073         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15074         case 0:                 return "serdes";
15075         default:                return "unknown";
15076         }
15077 }
15078
15079 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15080 {
15081         if (tg3_flag(tp, PCI_EXPRESS)) {
15082                 strcpy(str, "PCI Express");
15083                 return str;
15084         } else if (tg3_flag(tp, PCIX_MODE)) {
15085                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15086
15087                 strcpy(str, "PCIX:");
15088
15089                 if ((clock_ctrl == 7) ||
15090                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15091                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15092                         strcat(str, "133MHz");
15093                 else if (clock_ctrl == 0)
15094                         strcat(str, "33MHz");
15095                 else if (clock_ctrl == 2)
15096                         strcat(str, "50MHz");
15097                 else if (clock_ctrl == 4)
15098                         strcat(str, "66MHz");
15099                 else if (clock_ctrl == 6)
15100                         strcat(str, "100MHz");
15101         } else {
15102                 strcpy(str, "PCI:");
15103                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15104                         strcat(str, "66MHz");
15105                 else
15106                         strcat(str, "33MHz");
15107         }
15108         if (tg3_flag(tp, PCI_32BIT))
15109                 strcat(str, ":32-bit");
15110         else
15111                 strcat(str, ":64-bit");
15112         return str;
15113 }
15114
15115 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15116 {
15117         struct pci_dev *peer;
15118         unsigned int func, devnr = tp->pdev->devfn & ~7;
15119
15120         for (func = 0; func < 8; func++) {
15121                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15122                 if (peer && peer != tp->pdev)
15123                         break;
15124                 pci_dev_put(peer);
15125         }
15126         /* 5704 can be configured in single-port mode, set peer to
15127          * tp->pdev in that case.
15128          */
15129         if (!peer) {
15130                 peer = tp->pdev;
15131                 return peer;
15132         }
15133
15134         /*
15135          * We don't need to keep the refcount elevated; there's no way
15136          * to remove one half of this device without removing the other
15137          */
15138         pci_dev_put(peer);
15139
15140         return peer;
15141 }
15142
15143 static void __devinit tg3_init_coal(struct tg3 *tp)
15144 {
15145         struct ethtool_coalesce *ec = &tp->coal;
15146
15147         memset(ec, 0, sizeof(*ec));
15148         ec->cmd = ETHTOOL_GCOALESCE;
15149         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15150         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15151         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15152         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15153         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15154         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15155         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15156         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15157         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15158
15159         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15160                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15161                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15162                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15163                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15164                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15165         }
15166
15167         if (tg3_flag(tp, 5705_PLUS)) {
15168                 ec->rx_coalesce_usecs_irq = 0;
15169                 ec->tx_coalesce_usecs_irq = 0;
15170                 ec->stats_block_coalesce_usecs = 0;
15171         }
15172 }
15173
15174 static const struct net_device_ops tg3_netdev_ops = {
15175         .ndo_open               = tg3_open,
15176         .ndo_stop               = tg3_close,
15177         .ndo_start_xmit         = tg3_start_xmit,
15178         .ndo_get_stats64        = tg3_get_stats64,
15179         .ndo_validate_addr      = eth_validate_addr,
15180         .ndo_set_multicast_list = tg3_set_rx_mode,
15181         .ndo_set_mac_address    = tg3_set_mac_addr,
15182         .ndo_do_ioctl           = tg3_ioctl,
15183         .ndo_tx_timeout         = tg3_tx_timeout,
15184         .ndo_change_mtu         = tg3_change_mtu,
15185         .ndo_fix_features       = tg3_fix_features,
15186         .ndo_set_features       = tg3_set_features,
15187 #ifdef CONFIG_NET_POLL_CONTROLLER
15188         .ndo_poll_controller    = tg3_poll_controller,
15189 #endif
15190 };
15191
15192 static int __devinit tg3_init_one(struct pci_dev *pdev,
15193                                   const struct pci_device_id *ent)
15194 {
15195         struct net_device *dev;
15196         struct tg3 *tp;
15197         int i, err, pm_cap;
15198         u32 sndmbx, rcvmbx, intmbx;
15199         char str[40];
15200         u64 dma_mask, persist_dma_mask;
15201         u32 features = 0;
15202
15203         printk_once(KERN_INFO "%s\n", version);
15204
15205         err = pci_enable_device(pdev);
15206         if (err) {
15207                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15208                 return err;
15209         }
15210
15211         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15212         if (err) {
15213                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15214                 goto err_out_disable_pdev;
15215         }
15216
15217         pci_set_master(pdev);
15218
15219         /* Find power-management capability. */
15220         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15221         if (pm_cap == 0) {
15222                 dev_err(&pdev->dev,
15223                         "Cannot find Power Management capability, aborting\n");
15224                 err = -EIO;
15225                 goto err_out_free_res;
15226         }
15227
15228         err = pci_set_power_state(pdev, PCI_D0);
15229         if (err) {
15230                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15231                 goto err_out_free_res;
15232         }
15233
15234         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15235         if (!dev) {
15236                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15237                 err = -ENOMEM;
15238                 goto err_out_power_down;
15239         }
15240
15241         SET_NETDEV_DEV(dev, &pdev->dev);
15242
15243         tp = netdev_priv(dev);
15244         tp->pdev = pdev;
15245         tp->dev = dev;
15246         tp->pm_cap = pm_cap;
15247         tp->rx_mode = TG3_DEF_RX_MODE;
15248         tp->tx_mode = TG3_DEF_TX_MODE;
15249
15250         if (tg3_debug > 0)
15251                 tp->msg_enable = tg3_debug;
15252         else
15253                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15254
15255         /* The word/byte swap controls here control register access byte
15256          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15257          * setting below.
15258          */
15259         tp->misc_host_ctrl =
15260                 MISC_HOST_CTRL_MASK_PCI_INT |
15261                 MISC_HOST_CTRL_WORD_SWAP |
15262                 MISC_HOST_CTRL_INDIR_ACCESS |
15263                 MISC_HOST_CTRL_PCISTATE_RW;
15264
15265         /* The NONFRM (non-frame) byte/word swap controls take effect
15266          * on descriptor entries, anything which isn't packet data.
15267          *
15268          * The StrongARM chips on the board (one for tx, one for rx)
15269          * are running in big-endian mode.
15270          */
15271         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15272                         GRC_MODE_WSWAP_NONFRM_DATA);
15273 #ifdef __BIG_ENDIAN
15274         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15275 #endif
15276         spin_lock_init(&tp->lock);
15277         spin_lock_init(&tp->indirect_lock);
15278         INIT_WORK(&tp->reset_task, tg3_reset_task);
15279
15280         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15281         if (!tp->regs) {
15282                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15283                 err = -ENOMEM;
15284                 goto err_out_free_dev;
15285         }
15286
15287         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15288             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15289             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15290             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15291             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15292             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15293             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15294             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15295                 tg3_flag_set(tp, ENABLE_APE);
15296                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15297                 if (!tp->aperegs) {
15298                         dev_err(&pdev->dev,
15299                                 "Cannot map APE registers, aborting\n");
15300                         err = -ENOMEM;
15301                         goto err_out_iounmap;
15302                 }
15303         }
15304
15305         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15306         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15307
15308         dev->ethtool_ops = &tg3_ethtool_ops;
15309         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15310         dev->netdev_ops = &tg3_netdev_ops;
15311         dev->irq = pdev->irq;
15312
15313         err = tg3_get_invariants(tp);
15314         if (err) {
15315                 dev_err(&pdev->dev,
15316                         "Problem fetching invariants of chip, aborting\n");
15317                 goto err_out_apeunmap;
15318         }
15319
15320         /* The EPB bridge inside 5714, 5715, and 5780 and any
15321          * device behind the EPB cannot support DMA addresses > 40-bit.
15322          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15323          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15324          * do DMA address check in tg3_start_xmit().
15325          */
15326         if (tg3_flag(tp, IS_5788))
15327                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15328         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15329                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15330 #ifdef CONFIG_HIGHMEM
15331                 dma_mask = DMA_BIT_MASK(64);
15332 #endif
15333         } else
15334                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15335
15336         /* Configure DMA attributes. */
15337         if (dma_mask > DMA_BIT_MASK(32)) {
15338                 err = pci_set_dma_mask(pdev, dma_mask);
15339                 if (!err) {
15340                         features |= NETIF_F_HIGHDMA;
15341                         err = pci_set_consistent_dma_mask(pdev,
15342                                                           persist_dma_mask);
15343                         if (err < 0) {
15344                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15345                                         "DMA for consistent allocations\n");
15346                                 goto err_out_apeunmap;
15347                         }
15348                 }
15349         }
15350         if (err || dma_mask == DMA_BIT_MASK(32)) {
15351                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15352                 if (err) {
15353                         dev_err(&pdev->dev,
15354                                 "No usable DMA configuration, aborting\n");
15355                         goto err_out_apeunmap;
15356                 }
15357         }
15358
15359         tg3_init_bufmgr_config(tp);
15360
15361         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15362
15363         /* 5700 B0 chips do not support checksumming correctly due
15364          * to hardware bugs.
15365          */
15366         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15367                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15368
15369                 if (tg3_flag(tp, 5755_PLUS))
15370                         features |= NETIF_F_IPV6_CSUM;
15371         }
15372
15373         /* TSO is on by default on chips that support hardware TSO.
15374          * Firmware TSO on older chips gives lower performance, so it
15375          * is off by default, but can be enabled using ethtool.
15376          */
15377         if ((tg3_flag(tp, HW_TSO_1) ||
15378              tg3_flag(tp, HW_TSO_2) ||
15379              tg3_flag(tp, HW_TSO_3)) &&
15380             (features & NETIF_F_IP_CSUM))
15381                 features |= NETIF_F_TSO;
15382         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15383                 if (features & NETIF_F_IPV6_CSUM)
15384                         features |= NETIF_F_TSO6;
15385                 if (tg3_flag(tp, HW_TSO_3) ||
15386                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15387                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15388                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15389                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15390                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15391                         features |= NETIF_F_TSO_ECN;
15392         }
15393
15394         dev->features |= features;
15395         dev->vlan_features |= features;
15396
15397         /*
15398          * Add loopback capability only for a subset of devices that support
15399          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15400          * loopback for the remaining devices.
15401          */
15402         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15403             !tg3_flag(tp, CPMU_PRESENT))
15404                 /* Add the loopback capability */
15405                 features |= NETIF_F_LOOPBACK;
15406
15407         dev->hw_features |= features;
15408
15409         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15410             !tg3_flag(tp, TSO_CAPABLE) &&
15411             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15412                 tg3_flag_set(tp, MAX_RXPEND_64);
15413                 tp->rx_pending = 63;
15414         }
15415
15416         err = tg3_get_device_address(tp);
15417         if (err) {
15418                 dev_err(&pdev->dev,
15419                         "Could not obtain valid ethernet address, aborting\n");
15420                 goto err_out_apeunmap;
15421         }
15422
15423         /*
15424          * Reset chip in case UNDI or EFI driver did not shutdown
15425          * DMA self test will enable WDMAC and we'll see (spurious)
15426          * pending DMA on the PCI bus at that point.
15427          */
15428         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15429             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15430                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15431                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15432         }
15433
15434         err = tg3_test_dma(tp);
15435         if (err) {
15436                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15437                 goto err_out_apeunmap;
15438         }
15439
15440         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15441         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15442         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15443         for (i = 0; i < tp->irq_max; i++) {
15444                 struct tg3_napi *tnapi = &tp->napi[i];
15445
15446                 tnapi->tp = tp;
15447                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15448
15449                 tnapi->int_mbox = intmbx;
15450                 if (i < 4)
15451                         intmbx += 0x8;
15452                 else
15453                         intmbx += 0x4;
15454
15455                 tnapi->consmbox = rcvmbx;
15456                 tnapi->prodmbox = sndmbx;
15457
15458                 if (i)
15459                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15460                 else
15461                         tnapi->coal_now = HOSTCC_MODE_NOW;
15462
15463                 if (!tg3_flag(tp, SUPPORT_MSIX))
15464                         break;
15465
15466                 /*
15467                  * If we support MSIX, we'll be using RSS.  If we're using
15468                  * RSS, the first vector only handles link interrupts and the
15469                  * remaining vectors handle rx and tx interrupts.  Reuse the
15470                  * mailbox values for the next iteration.  The values we setup
15471                  * above are still useful for the single vectored mode.
15472                  */
15473                 if (!i)
15474                         continue;
15475
15476                 rcvmbx += 0x8;
15477
15478                 if (sndmbx & 0x4)
15479                         sndmbx -= 0x4;
15480                 else
15481                         sndmbx += 0xc;
15482         }
15483
15484         tg3_init_coal(tp);
15485
15486         pci_set_drvdata(pdev, dev);
15487
15488         if (tg3_flag(tp, 5717_PLUS)) {
15489                 /* Resume a low-power mode */
15490                 tg3_frob_aux_power(tp, false);
15491         }
15492
15493         err = register_netdev(dev);
15494         if (err) {
15495                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15496                 goto err_out_apeunmap;
15497         }
15498
15499         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15500                     tp->board_part_number,
15501                     tp->pci_chip_rev_id,
15502                     tg3_bus_string(tp, str),
15503                     dev->dev_addr);
15504
15505         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15506                 struct phy_device *phydev;
15507                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15508                 netdev_info(dev,
15509                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15510                             phydev->drv->name, dev_name(&phydev->dev));
15511         } else {
15512                 char *ethtype;
15513
15514                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15515                         ethtype = "10/100Base-TX";
15516                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15517                         ethtype = "1000Base-SX";
15518                 else
15519                         ethtype = "10/100/1000Base-T";
15520
15521                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15522                             "(WireSpeed[%d], EEE[%d])\n",
15523                             tg3_phy_string(tp), ethtype,
15524                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15525                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15526         }
15527
15528         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15529                     (dev->features & NETIF_F_RXCSUM) != 0,
15530                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15531                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15532                     tg3_flag(tp, ENABLE_ASF) != 0,
15533                     tg3_flag(tp, TSO_CAPABLE) != 0);
15534         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15535                     tp->dma_rwctrl,
15536                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15537                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15538
15539         pci_save_state(pdev);
15540
15541         return 0;
15542
15543 err_out_apeunmap:
15544         if (tp->aperegs) {
15545                 iounmap(tp->aperegs);
15546                 tp->aperegs = NULL;
15547         }
15548
15549 err_out_iounmap:
15550         if (tp->regs) {
15551                 iounmap(tp->regs);
15552                 tp->regs = NULL;
15553         }
15554
15555 err_out_free_dev:
15556         free_netdev(dev);
15557
15558 err_out_power_down:
15559         pci_set_power_state(pdev, PCI_D3hot);
15560
15561 err_out_free_res:
15562         pci_release_regions(pdev);
15563
15564 err_out_disable_pdev:
15565         pci_disable_device(pdev);
15566         pci_set_drvdata(pdev, NULL);
15567         return err;
15568 }
15569
15570 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15571 {
15572         struct net_device *dev = pci_get_drvdata(pdev);
15573
15574         if (dev) {
15575                 struct tg3 *tp = netdev_priv(dev);
15576
15577                 if (tp->fw)
15578                         release_firmware(tp->fw);
15579
15580                 cancel_work_sync(&tp->reset_task);
15581
15582                 if (!tg3_flag(tp, USE_PHYLIB)) {
15583                         tg3_phy_fini(tp);
15584                         tg3_mdio_fini(tp);
15585                 }
15586
15587                 unregister_netdev(dev);
15588                 if (tp->aperegs) {
15589                         iounmap(tp->aperegs);
15590                         tp->aperegs = NULL;
15591                 }
15592                 if (tp->regs) {
15593                         iounmap(tp->regs);
15594                         tp->regs = NULL;
15595                 }
15596                 free_netdev(dev);
15597                 pci_release_regions(pdev);
15598                 pci_disable_device(pdev);
15599                 pci_set_drvdata(pdev, NULL);
15600         }
15601 }
15602
15603 #ifdef CONFIG_PM_SLEEP
15604 static int tg3_suspend(struct device *device)
15605 {
15606         struct pci_dev *pdev = to_pci_dev(device);
15607         struct net_device *dev = pci_get_drvdata(pdev);
15608         struct tg3 *tp = netdev_priv(dev);
15609         int err;
15610
15611         if (!netif_running(dev))
15612                 return 0;
15613
15614         flush_work_sync(&tp->reset_task);
15615         tg3_phy_stop(tp);
15616         tg3_netif_stop(tp);
15617
15618         del_timer_sync(&tp->timer);
15619
15620         tg3_full_lock(tp, 1);
15621         tg3_disable_ints(tp);
15622         tg3_full_unlock(tp);
15623
15624         netif_device_detach(dev);
15625
15626         tg3_full_lock(tp, 0);
15627         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15628         tg3_flag_clear(tp, INIT_COMPLETE);
15629         tg3_full_unlock(tp);
15630
15631         err = tg3_power_down_prepare(tp);
15632         if (err) {
15633                 int err2;
15634
15635                 tg3_full_lock(tp, 0);
15636
15637                 tg3_flag_set(tp, INIT_COMPLETE);
15638                 err2 = tg3_restart_hw(tp, 1);
15639                 if (err2)
15640                         goto out;
15641
15642                 tp->timer.expires = jiffies + tp->timer_offset;
15643                 add_timer(&tp->timer);
15644
15645                 netif_device_attach(dev);
15646                 tg3_netif_start(tp);
15647
15648 out:
15649                 tg3_full_unlock(tp);
15650
15651                 if (!err2)
15652                         tg3_phy_start(tp);
15653         }
15654
15655         return err;
15656 }
15657
15658 static int tg3_resume(struct device *device)
15659 {
15660         struct pci_dev *pdev = to_pci_dev(device);
15661         struct net_device *dev = pci_get_drvdata(pdev);
15662         struct tg3 *tp = netdev_priv(dev);
15663         int err;
15664
15665         if (!netif_running(dev))
15666                 return 0;
15667
15668         netif_device_attach(dev);
15669
15670         tg3_full_lock(tp, 0);
15671
15672         tg3_flag_set(tp, INIT_COMPLETE);
15673         err = tg3_restart_hw(tp, 1);
15674         if (err)
15675                 goto out;
15676
15677         tp->timer.expires = jiffies + tp->timer_offset;
15678         add_timer(&tp->timer);
15679
15680         tg3_netif_start(tp);
15681
15682 out:
15683         tg3_full_unlock(tp);
15684
15685         if (!err)
15686                 tg3_phy_start(tp);
15687
15688         return err;
15689 }
15690
15691 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15692 #define TG3_PM_OPS (&tg3_pm_ops)
15693
15694 #else
15695
15696 #define TG3_PM_OPS NULL
15697
15698 #endif /* CONFIG_PM_SLEEP */
15699
15700 /**
15701  * tg3_io_error_detected - called when PCI error is detected
15702  * @pdev: Pointer to PCI device
15703  * @state: The current pci connection state
15704  *
15705  * This function is called after a PCI bus error affecting
15706  * this device has been detected.
15707  */
15708 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15709                                               pci_channel_state_t state)
15710 {
15711         struct net_device *netdev = pci_get_drvdata(pdev);
15712         struct tg3 *tp = netdev_priv(netdev);
15713         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15714
15715         netdev_info(netdev, "PCI I/O error detected\n");
15716
15717         rtnl_lock();
15718
15719         if (!netif_running(netdev))
15720                 goto done;
15721
15722         tg3_phy_stop(tp);
15723
15724         tg3_netif_stop(tp);
15725
15726         del_timer_sync(&tp->timer);
15727         tg3_flag_clear(tp, RESTART_TIMER);
15728
15729         /* Want to make sure that the reset task doesn't run */
15730         cancel_work_sync(&tp->reset_task);
15731         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15732         tg3_flag_clear(tp, RESTART_TIMER);
15733
15734         netif_device_detach(netdev);
15735
15736         /* Clean up software state, even if MMIO is blocked */
15737         tg3_full_lock(tp, 0);
15738         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15739         tg3_full_unlock(tp);
15740
15741 done:
15742         if (state == pci_channel_io_perm_failure)
15743                 err = PCI_ERS_RESULT_DISCONNECT;
15744         else
15745                 pci_disable_device(pdev);
15746
15747         rtnl_unlock();
15748
15749         return err;
15750 }
15751
15752 /**
15753  * tg3_io_slot_reset - called after the pci bus has been reset.
15754  * @pdev: Pointer to PCI device
15755  *
15756  * Restart the card from scratch, as if from a cold-boot.
15757  * At this point, the card has exprienced a hard reset,
15758  * followed by fixups by BIOS, and has its config space
15759  * set up identically to what it was at cold boot.
15760  */
15761 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15762 {
15763         struct net_device *netdev = pci_get_drvdata(pdev);
15764         struct tg3 *tp = netdev_priv(netdev);
15765         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15766         int err;
15767
15768         rtnl_lock();
15769
15770         if (pci_enable_device(pdev)) {
15771                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15772                 goto done;
15773         }
15774
15775         pci_set_master(pdev);
15776         pci_restore_state(pdev);
15777         pci_save_state(pdev);
15778
15779         if (!netif_running(netdev)) {
15780                 rc = PCI_ERS_RESULT_RECOVERED;
15781                 goto done;
15782         }
15783
15784         err = tg3_power_up(tp);
15785         if (err)
15786                 goto done;
15787
15788         rc = PCI_ERS_RESULT_RECOVERED;
15789
15790 done:
15791         rtnl_unlock();
15792
15793         return rc;
15794 }
15795
15796 /**
15797  * tg3_io_resume - called when traffic can start flowing again.
15798  * @pdev: Pointer to PCI device
15799  *
15800  * This callback is called when the error recovery driver tells
15801  * us that its OK to resume normal operation.
15802  */
15803 static void tg3_io_resume(struct pci_dev *pdev)
15804 {
15805         struct net_device *netdev = pci_get_drvdata(pdev);
15806         struct tg3 *tp = netdev_priv(netdev);
15807         int err;
15808
15809         rtnl_lock();
15810
15811         if (!netif_running(netdev))
15812                 goto done;
15813
15814         tg3_full_lock(tp, 0);
15815         tg3_flag_set(tp, INIT_COMPLETE);
15816         err = tg3_restart_hw(tp, 1);
15817         tg3_full_unlock(tp);
15818         if (err) {
15819                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15820                 goto done;
15821         }
15822
15823         netif_device_attach(netdev);
15824
15825         tp->timer.expires = jiffies + tp->timer_offset;
15826         add_timer(&tp->timer);
15827
15828         tg3_netif_start(tp);
15829
15830         tg3_phy_start(tp);
15831
15832 done:
15833         rtnl_unlock();
15834 }
15835
15836 static struct pci_error_handlers tg3_err_handler = {
15837         .error_detected = tg3_io_error_detected,
15838         .slot_reset     = tg3_io_slot_reset,
15839         .resume         = tg3_io_resume
15840 };
15841
15842 static struct pci_driver tg3_driver = {
15843         .name           = DRV_MODULE_NAME,
15844         .id_table       = tg3_pci_tbl,
15845         .probe          = tg3_init_one,
15846         .remove         = __devexit_p(tg3_remove_one),
15847         .err_handler    = &tg3_err_handler,
15848         .driver.pm      = TG3_PM_OPS,
15849 };
15850
15851 static int __init tg3_init(void)
15852 {
15853         return pci_register_driver(&tg3_driver);
15854 }
15855
15856 static void __exit tg3_cleanup(void)
15857 {
15858         pci_unregister_driver(&tg3_driver);
15859 }
15860
15861 module_init(tg3_init);
15862 module_exit(tg3_cleanup);